summaryrefslogtreecommitdiffstats
path: root/src/dpdk/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'src/dpdk/drivers/net')
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_dev.c38
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_dev.h3
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_devcmd.h346
-rw-r--r--src/dpdk/drivers/net/enic/enic.h14
-rw-r--r--src/dpdk/drivers/net/enic/enic_clsf.c352
-rw-r--r--src/dpdk/drivers/net/enic/enic_ethdev.c9
-rw-r--r--src/dpdk/drivers/net/enic/enic_main.c6
-rw-r--r--src/dpdk/drivers/net/enic/enic_res.c5
-rw-r--r--src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c6
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5.c20
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5.h31
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_autoconf.h8
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_fdir.c98
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxq.c4
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxtx.c4
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxtx.h4
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_stats.c321
17 files changed, 1110 insertions, 159 deletions
diff --git a/src/dpdk/drivers/net/enic/base/vnic_dev.c b/src/dpdk/drivers/net/enic/base/vnic_dev.c
index fc2e4cc3..e50b90e7 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_dev.c
+++ b/src/dpdk/drivers/net/enic/base/vnic_dev.c
@@ -462,6 +462,18 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
}
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
+{
+ u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err)
+ return 0;
+ return (a1 >= (u32)FILTER_DPDK_1);
+}
+
static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
@@ -655,7 +667,12 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
(promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
(allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+#define TREX_PATCH
+#ifdef TREX_PATCH
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
+#else
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+#endif
if (err)
pr_err("Can't set packet filter\n");
@@ -999,7 +1016,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
* @data: filter data
*/
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data)
+ struct filter_v2 *data)
{
u64 a0, a1;
int wait = 1000;
@@ -1008,11 +1025,20 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_tlv *tlv, *tlv_va;
struct filter_action *action;
u64 tlv_size;
+ u32 filter_size;
static unsigned int unique_id;
char z_name[RTE_MEMZONE_NAMESIZE];
+ enum vnic_devcmd_cmd dev_cmd;
+
if (cmd == CLSF_ADD) {
- tlv_size = sizeof(struct filter) +
+ if (data->type == FILTER_DPDK_1)
+ dev_cmd = CMD_ADD_ADV_FILTER;
+ else
+ dev_cmd = CMD_ADD_FILTER;
+
+ filter_size = vnic_filter_size(data);
+ tlv_size = filter_size +
sizeof(struct filter_action) +
2*sizeof(struct filter_tlv);
snprintf((char *)z_name, sizeof(z_name),
@@ -1026,12 +1052,12 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
a1 = tlv_size;
memset(tlv, 0, tlv_size);
tlv->type = CLSF_TLV_FILTER;
- tlv->length = sizeof(struct filter);
- *(struct filter *)&tlv->val = *data;
+ tlv->length = filter_size;
+ memcpy(&tlv->val, (void *)data, filter_size);
tlv = (struct filter_tlv *)((char *)tlv +
sizeof(struct filter_tlv) +
- sizeof(struct filter));
+ filter_size);
tlv->type = CLSF_TLV_ACTION;
tlv->length = sizeof(struct filter_action);
@@ -1039,7 +1065,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
action->type = FILTER_ACTION_RQ_STEERING;
action->u.rq_idx = *entry;
- ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+ ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
*entry = (u16)a0;
vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
diff --git a/src/dpdk/drivers/net/enic/base/vnic_dev.h b/src/dpdk/drivers/net/enic/base/vnic_dev.h
index 689442f3..06ebd4ce 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_dev.h
+++ b/src/dpdk/drivers/net/enic/base/vnic_dev.h
@@ -134,6 +134,7 @@ void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -201,7 +202,7 @@ int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data);
+ struct filter_v2 *data);
#ifdef ENIC_VXLAN
int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
u8 overlay, u8 config);
diff --git a/src/dpdk/drivers/net/enic/base/vnic_devcmd.h b/src/dpdk/drivers/net/enic/base/vnic_devcmd.h
index b3d5a6cc..785fd6fd 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_devcmd.h
+++ b/src/dpdk/drivers/net/enic/base/vnic_devcmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* Copyright (c) 2014, Cisco Systems, Inc.
@@ -126,7 +126,8 @@ enum vnic_devcmd_cmd {
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
- * out: a0=value */
+ * out: a0=value
+ */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
@@ -146,8 +147,9 @@ enum vnic_devcmd_cmd {
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
- CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+#define CMD_GET_MAC_ADDR CMD_MAC_ADDR /* some uses are aliased */
/* add addr from (u48)a0 */
CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
@@ -387,9 +389,8 @@ enum vnic_devcmd_cmd {
* Subvnic migration from MQ <--> VF.
* Enable the LIF migration from MQ to VF and vice versa. MQ and VF
* indexes are statically bound at the time of initialization.
- * Based on the
- * direction of migration, the resources of either MQ or the VF shall
- * be attached to the LIF.
+ * Based on the direction of migration, the resources of either MQ or
+ * the VF shall be attached to the LIF.
* in: (u32)a0=Direction of Migration
* 0=> Migrate to VF
* 1=> Migrate to MQ
@@ -397,7 +398,6 @@ enum vnic_devcmd_cmd {
*/
CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
-
/*
* Register / Deregister the notification block for MQ subvnics
* in:
@@ -433,6 +433,10 @@ enum vnic_devcmd_cmd {
* in: (u64) a0= filter address
* (u32) a1= size of filter
* out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capability query supported
+ * (u64) a1= MAX filter type supported
*/
CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
@@ -471,23 +475,133 @@ enum vnic_devcmd_cmd {
CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
/*
- * Enable/Disable overlay offloads on the given vnic
+ * UEFI BOOT API: (u64)a0= UEFI FLS_CMD_xxx
+ * (ui64)a1= paddr for the info buffer
+ */
+ CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64),
+
+ /*
+ * Return the iSCSI config details required by the EFI Option ROM
+ * in: (u32) a0=0 Get Boot Info for PXE eNIC as per pxe_boot_config_t
+ * a0=1 Get Boot info for iSCSI enic as per
+ * iscsi_boot_efi_cfg_t
+ * in: (u64) a1=Host address where iSCSI config info is returned
+ */
+ CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65),
+
+ /*
+ * Create a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u32) a1 = Remote QP
+ * (u32) a2 = RDMA-RQ
+ * (u16) a3 = RQ Res Group
+ * (u16) a4 = SQ Res Group
+ * (u32) a5 = Protection Domain
+ * (u64) a6 = Remote MAC
+ * (u32) a7 = start PSN
+ * (u16) a8 = MSS
+ * (u32) a9 = protocol version
+ */
+ CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66),
+
+ /*
+ * Delete a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ */
+ CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67),
+
+ /*
+ * Retrieve a Queue Pair's status information (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u64) a1 = host buffer addr for QP status struct
+ * (u32) a2 = length of the buffer
+ */
+ CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68),
+
+ /*
+ * Use this devcmd for agreeing on the highest common version supported
+ * by both driver and fw for by features who need such a facility.
+ * in: (u64) a0 = feature (driver requests for the supported versions
+ * on this feature)
+ * out: (u64) a0 = bitmap of all supported versions for that feature
+ */
+ CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+ /*
+ * Initialize the RDMA notification work queue
+ * in: (u64) a0 = host buffer address
+ * in: (u16) a1 = number of entries in buffer
+ * in: (u16) a2 = resource group number
+ * in: (u16) a3 = CQ number to post completion
+ */
+ CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70),
+
+ /*
+ * De-init the RDMA notification work queue
+ * in: (u64) a0=resource group number
+ */
+ CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71),
+
+ /*
+ * Control (Enable/Disable) overlay offloads on the given vnic
* in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
* a0 = OVERLAY_FEATURE_VXLAN : VxLAN
- * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable
- * a1 = OVERLAY_OFFLOAD_DISABLE : Disable
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+ * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
*/
- CMD_OVERLAY_OFFLOAD_ENABLE_DISABLE =
- _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+ CMD_OVERLAY_OFFLOAD_CTRL =
+ _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
/*
* Configuration of overlay offloads feature on a given vNIC
- * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE
- * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN
- * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN
- * in: (u16) a2 = unsigned short int port information
+ * in: (u8) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a1 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Return the configured name for the device
+ * in: (u64) a0=Host address where the name is copied
+ * (u32) a1=Size of the buffer
+ */
+ CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74),
+
+ /*
+ * Enable group interrupt for the VF
+ * in: (u32) a0 = GRPINTR_ENABLE : enable
+ * a0 = GRPINTR_DISABLE : disable
+ * a0 = GRPINTR_UPD_VECT: update group vector addr
+ * in: (u32) a1 = interrupt group count
+ * in: (u64) a2 = Start of host buffer address for DMAing group
+ * vector bitmap
+ * in: (u64) a3 = Stride between group vectors
+ */
+ CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75),
+
+ /*
+ * Set cq arrary base and size in a list of consective wqs and
+ * rqs for a device
+ * in: (u16) a0 = the wq relative index in the device.
+ * -1 indicates skipping wq configuration
+ * in: (u16) a1 = the wcq relative index in the device
+ * in: (u16) a2 = the rq relative index in the device
+ * -1 indicates skipping rq configuration
+ * in: (u16) a3 = the rcq relative index in the device
+ */
+ CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76),
+
+ /*
+ * Add an advanced filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capabliity query supported
+ * (u64) a1= MAX filter type supported
+ */
+ CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
};
/* CMD_ENABLE2 flags */
@@ -520,6 +634,9 @@ enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+ STAT_FAILOVER = 1 << 2, /* always set on vnics in pci standby state
+ * if seen a failover to the standby happened
+ */
};
enum vnic_devcmd_error {
@@ -558,9 +675,9 @@ enum fwinfo_asic_type {
FWINFO_ASIC_TYPE_UNKNOWN,
FWINFO_ASIC_TYPE_PALO,
FWINFO_ASIC_TYPE_SERENO,
+ FWINFO_ASIC_TYPE_CRUZ,
};
-
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
@@ -595,25 +712,16 @@ struct vnic_devcmd_provinfo {
*/
#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
-#define FILTER_FIELDS_USNIC (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4))
-
-#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4) | \
- FILTER_FIELD_VALID(5))
-
-#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2))
-
#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+#define FILTER_FIELDS_USNIC (FILTER_FIELD_USNIC_VLAN | \
+ FILTER_FIELD_USNIC_ETHTYPE | \
+ FILTER_FIELD_USNIC_PROTO | \
+ FILTER_FIELD_USNIC_ID)
+
struct filter_usnic_id {
u32 flags;
u16 vlan;
@@ -628,10 +736,18 @@ struct filter_usnic_id {
#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_5TUP_PROTO | \
+ FILTER_FIELD_5TUP_SRC_AD | \
+ FILTER_FIELD_5TUP_DST_AD | \
+ FILTER_FIELD_5TUP_SRC_PT | \
+ FILTER_FIELD_5TUP_DST_PT)
+
/* Enums for the protocol field. */
enum protocol_e {
PROTO_UDP = 0,
PROTO_TCP = 1,
+ PROTO_IPV4 = 2,
+ PROTO_IPV6 = 3
};
struct filter_ipv4_5tuple {
@@ -646,12 +762,78 @@ struct filter_ipv4_5tuple {
#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VMQ_VLAN | \
+ FILTER_FIELD_VMQ_MAC)
+
+#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC
+
struct filter_mac_vlan {
u32 flags;
u16 vlan;
u8 mac_addr[6];
} __attribute__((packed));
+#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_VLAN_IP_3TUP (FILTER_FIELD_VLAN_IP_3TUP_VLAN | \
+ FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_AD | \
+ FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_PT)
+
+struct filter_vlan_ip_3tuple {
+ u32 flags;
+ u16 vlan;
+ u16 l3_protocol;
+ union {
+ u32 dst_addr_v4;
+ u8 dst_addr_v6[16];
+ } u;
+ u32 l4_protocol;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_GENERIC_1_BYTES 64
+
+enum filter_generic_1_layer {
+ FILTER_GENERIC_1_L2,
+ FILTER_GENERIC_1_L3,
+ FILTER_GENERIC_1_L4,
+ FILTER_GENERIC_1_L5,
+ FILTER_GENERIC_1_NUM_LAYERS
+};
+
+#define FILTER_GENERIC_1_IPV4 (1 << 0)
+#define FILTER_GENERIC_1_IPV6 (1 << 1)
+#define FILTER_GENERIC_1_UDP (1 << 2)
+#define FILTER_GENERIC_1_TCP (1 << 3)
+#define FILTER_GENERIC_1_TCP_OR_UDP (1 << 4)
+#define FILTER_GENERIC_1_IP4SUM_OK (1 << 5)
+#define FILTER_GENERIC_1_L4SUM_OK (1 << 6)
+#define FILTER_GENERIC_1_IPFRAG (1 << 7)
+
+#define FILTER_GENERIC_1_KEY_LEN 64
+
+/*
+ * Version 1 of generic filter specification
+ * position is only 16 bits, reserving positions > 64k to be used by firmware
+ */
+struct filter_generic_1 {
+ u16 position; /* lower position comes first */
+ u32 mask_flags;
+ u32 val_flags;
+ u16 mask_vlan;
+ u16 val_vlan;
+ struct {
+ u8 mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means "don't care"*/
+ u8 val[FILTER_GENERIC_1_KEY_LEN];
+ } __attribute__((packed)) layer[FILTER_GENERIC_1_NUM_LAYERS];
+} __attribute__((packed));
+
/* Specifies the filter_action type. */
enum {
FILTER_ACTION_RQ_STEERING = 0,
@@ -670,6 +852,10 @@ enum filter_type {
FILTER_USNIC_ID = 0,
FILTER_IPV4_5TUPLE = 1,
FILTER_MAC_VLAN = 2,
+ FILTER_VLAN_IP_3TUPLE = 3,
+ FILTER_NVGRE_VMQ = 4,
+ FILTER_USNIC_IP = 5,
+ FILTER_DPDK_1 = 6,
FILTER_MAX
};
@@ -679,6 +865,27 @@ struct filter {
struct filter_usnic_id usnic;
struct filter_ipv4_5tuple ipv4;
struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ } u;
+} __attribute__((packed));
+
+/*
+ * This is a strict superset of "struct filter" and exists only
+ * because many drivers use "sizeof (struct filter)" in deciding TLV size.
+ * This new, larger struct filter would cause any code that uses that method
+ * to not work with older firmware, so we add filter_v2 to hold the
+ * new filter types. Drivers should use vnic_filter_size() to determine
+ * the TLV size instead of sizeof (struct fiter_v2) to guard against future
+ * growth.
+ */
+struct filter_v2 {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ struct filter_generic_1 generic_1;
} u;
} __attribute__((packed));
@@ -687,14 +894,55 @@ enum {
CLSF_TLV_ACTION = 1,
};
-#define FILTER_MAX_BUF_SIZE 100 /* Maximum size of buffer to CMD_ADD_FILTER */
-
struct filter_tlv {
- uint32_t type;
- uint32_t length;
- uint32_t val[0];
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
};
+/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
+#define FILTER_MAX_BUF_SIZE 100
+#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
+ sizeof(struct filter_action) + \
+ (2 * sizeof(struct filter_tlv)))
+
+/*
+ * Compute actual structure size given filter type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_v2)" when
+ * computing length for TLV.
+ */
+static inline u_int32_t
+vnic_filter_size(struct filter_v2 *fp)
+{
+ u_int32_t size;
+
+ switch (fp->type) {
+ case FILTER_USNIC_ID:
+ size = sizeof(fp->u.usnic);
+ break;
+ case FILTER_IPV4_5TUPLE:
+ size = sizeof(fp->u.ipv4);
+ break;
+ case FILTER_MAC_VLAN:
+ case FILTER_NVGRE_VMQ:
+ size = sizeof(fp->u.mac_vlan);
+ break;
+ case FILTER_VLAN_IP_3TUPLE:
+ size = sizeof(fp->u.vlan_3tuple);
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ size = sizeof(fp->u.generic_1);
+ break;
+ default:
+ size = sizeof(fp->u);
+ break;
+ }
+ size += sizeof(fp->type);
+ return size;
+}
+
+
enum {
CLSF_ADD = 0,
CLSF_DEL = 1,
@@ -766,8 +1014,30 @@ typedef enum {
OVERLAY_FEATURE_MAX,
} overlay_feature_t;
-#define OVERLAY_OFFLOAD_ENABLE 0
-#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE 0
+#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE_V2 2
#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+
+/*
+ * Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+typedef enum {
+ VIC_FEATURE_VXLAN,
+ VIC_FEATURE_RDMA,
+ VIC_FEATURE_MAX,
+} vic_feature_t;
+
+/*
+ * CMD_CONFIG_GRPINTR subcommands
+ */
+typedef enum {
+ GRPINTR_ENABLE = 1,
+ GRPINTR_DISABLE,
+ GRPINTR_UPD_VECT,
+} grpintr_subcmd_t;
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/src/dpdk/drivers/net/enic/enic.h b/src/dpdk/drivers/net/enic/enic.h
index 4c16ef17..c00d3fb8 100644
--- a/src/dpdk/drivers/net/enic/enic.h
+++ b/src/dpdk/drivers/net/enic/enic.h
@@ -92,6 +92,12 @@ struct enic_fdir {
struct rte_eth_fdir_stats stats;
struct rte_hash *hash;
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
+ u32 modes;
+ u32 types_mask;
+ void (*copy_fltr_fn)(struct filter_v2 *filt,
+ struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+
};
struct enic_soft_stats {
@@ -128,6 +134,7 @@ struct enic {
int link_status;
u8 hw_ip_checksum;
u16 max_mtu;
+ u16 adv_filters;
unsigned int flags;
unsigned int priv_flags;
@@ -273,4 +280,11 @@ uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
+void enic_fdir_info(struct enic *enic);
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
+void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
+ __rte_unused struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks);
#endif /* _ENIC_H_ */
diff --git a/src/dpdk/drivers/net/enic/enic_clsf.c b/src/dpdk/drivers/net/enic/enic_clsf.c
index e6f57bea..8f68faab 100644
--- a/src/dpdk/drivers/net/enic/enic_clsf.c
+++ b/src/dpdk/drivers/net/enic/enic_clsf.c
@@ -38,6 +38,11 @@
#include <rte_malloc.h>
#include <rte_hash.h>
#include <rte_byteorder.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_eth_ctrl.h>
#include "enic_compat.h"
#include "enic.h"
@@ -67,6 +72,296 @@ void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
*stats = enic->fdir.stats;
}
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
+{
+ info->mode = enic->fdir.modes;
+ info->flow_types_mask[0] = enic->fdir.types_mask;
+}
+
+void enic_fdir_info(struct enic *enic)
+{
+ enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ if (enic->adv_filters) {
+ enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ enic->fdir.copy_fltr_fn = copy_fltr_v2;
+ } else {
+ enic->fdir.copy_fltr_fn = copy_fltr_v1;
+ }
+}
+
+static void
+enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
+ enum filter_generic_1_layer layer, void *mask, void *val,
+ unsigned int len)
+{
+ gp->mask_flags |= flag;
+ gp->val_flags |= gp->mask_flags;
+ memcpy(gp->layer[layer].mask, mask, len);
+ memcpy(gp->layer[layer].val, val, len);
+}
+
+
+/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
+ * without advanced filter support.
+ */
+void
+copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks)
+{
+ fltr->type = FILTER_IPV4_5TUPLE;
+ fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.src_ip);
+ fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.dst_ip);
+ fltr->u.ipv4.src_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.src_port);
+ fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.dst_port);
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
+ fltr->u.ipv4.protocol = PROTO_TCP;
+ else
+ fltr->u.ipv4.protocol = PROTO_UDP;
+
+ fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+}
+
+#define TREX_PATCH
+#ifdef TREX_PATCH
+void
+copy_fltr_recv_all(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks) {
+ struct filter_generic_1 *gp = &fltr->u.generic_1;
+ memset(gp, 0, sizeof(*gp));
+
+ struct ether_hdr eth_mask, eth_val;
+ memset(&eth_mask, 0, sizeof(eth_mask));
+ memset(&eth_val, 0, sizeof(eth_val));
+
+ eth_val.ether_type = 0x0806;
+ eth_mask.ether_type = 0;
+
+ gp->position = 0;
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L2,
+ &eth_mask, &eth_val, sizeof(struct ether_hdr));
+
+}
+#endif
+
+/* Copy Flow Director filter to a VIC generic filter (requires advanced
+ * filter support.
+ */
+void
+copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks)
+{
+ struct filter_generic_1 *gp = &fltr->u.generic_1;
+ int i;
+
+ RTE_ASSERT(enic->adv_filters);
+
+ fltr->type = FILTER_DPDK_1;
+ memset(gp, 0, sizeof(*gp));
+#ifdef TREX_PATCH
+ // important for this to be below 2.
+ // If added with position 2, IPv6 UDP and ICMP seems to be caught by some other rule
+ gp->position = 1;
+#endif
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp4_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp4_flow.src_port;
+ }
+ if (input->flow.udp4_flow.dst_port) {
+ udp_mask.src_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp4_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp4_flow.src_port;
+ }
+ if (input->flow.tcp4_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp4_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp4_flow.src_port;
+ }
+ if (input->flow.sctp4_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
+ }
+ if (input->flow.sctp4_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp4_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ip4_flow.proto */
+ input->flow.ip4_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
+ struct ipv4_hdr ip4_mask, ip4_val;
+ memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
+ memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+
+ if (input->flow.ip4_flow.tos) {
+ ip4_mask.type_of_service = masks->ipv4_mask.tos;
+ ip4_val.type_of_service = input->flow.ip4_flow.tos;
+ }
+ if (input->flow.ip4_flow.ip_id) {
+ ip4_mask.packet_id = 0xffff;
+ ip4_val.packet_id = input->flow.ip4_flow.ip_id;
+ }
+ if (input->flow.ip4_flow.ttl) {
+ ip4_mask.time_to_live = 0xff;
+ ip4_val.time_to_live = input->flow.ip4_flow.ttl;
+ }
+ if (input->flow.ip4_flow.proto) {
+ ip4_mask.next_proto_id = 0xff;
+ ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ }
+ if (input->flow.ip4_flow.src_ip) {
+ ip4_mask.src_addr = masks->ipv4_mask.src_ip;
+ ip4_val.src_addr = input->flow.ip4_flow.src_ip;
+ }
+ if (input->flow.ip4_flow.dst_ip) {
+ ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
+ ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
+ &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp6_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp6_flow.src_port;
+ }
+ if (input->flow.udp6_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp6_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp6_flow.src_port;
+ }
+ if (input->flow.tcp6_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp6_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp6_flow.src_port;
+ }
+ if (input->flow.sctp6_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
+ }
+ if (input->flow.sctp6_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp6_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ipv6_flow.proto */
+ input->flow.ipv6_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
+ struct ipv6_hdr ipv6_mask, ipv6_val;
+ memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
+ memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+
+ if (input->flow.ipv6_flow.proto) {
+ ipv6_mask.proto = masks->ipv6_mask.proto;
+ ipv6_val.proto = input->flow.ipv6_flow.proto;
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.src_addr[i * 4] =
+ input->flow.ipv6_flow.src_ip[i];
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
+ input->flow.ipv6_flow.dst_ip[i];
+ }
+ if (input->flow.ipv6_flow.tc) {
+ ipv6_mask.vtc_flow = ((uint32_t)masks->ipv6_mask.tc<<12);
+ ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
+ }
+ if (input->flow.ipv6_flow.hop_limits) {
+ ipv6_mask.hop_limits = 0xff;
+ ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
+ &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+ }
+}
+
int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
int32_t pos;
@@ -77,11 +372,23 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
case -EINVAL:
case -ENOENT:
enic->fdir.stats.f_remove++;
+#ifdef TREX_PATCH
+ return pos;
+#else
return -EINVAL;
+#endif
default:
/* The entry is present in the table */
key = enic->fdir.nodes[pos];
+#ifdef TREX_PATCH
+ switch (params->soft_id) {
+ case 100:
+ // remove promisc when we delete 'receive all' filter
+ vnic_dev_packet_filter(enic->vdev, 1, 1, 1, 0, 1);
+ break;
+ }
+#endif
/* Delete the filter */
vnic_dev_classifier(enic->vdev, CLSF_DEL,
&key->fltr_id, NULL);
@@ -97,7 +404,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
struct enic_fdir_node *key;
- struct filter fltr = {0};
+ struct filter_v2 fltr;
int32_t pos;
u8 do_free = 0;
u16 old_fltr_id = 0;
@@ -105,9 +412,9 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
u16 flex_bytes;
u16 queue;
- flowtype_supported = (
- (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
- (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));
+ memset(&fltr, 0, sizeof(fltr));
+ flowtype_supported = enic->fdir.types_mask
+ & (1 << params->input.flow_type);
flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
(params->input.flow_ext.flexbytes[0] & 0xFF));
@@ -120,7 +427,12 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
return -ENOTSUP;
}
- queue = params->action.rx_queue;
+ /* Get the enicpmd RQ from the DPDK Rx queue */
+ queue = enic_sop_rq(params->action.rx_queue);
+
+ if (!enic->rq[queue].in_use)
+ return -EINVAL;
+
/* See if the key is already there in the table */
pos = rte_hash_del_key(enic->fdir.hash, params);
switch (pos) {
@@ -183,22 +495,19 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
key->filter = *params;
key->rq_index = queue;
- fltr.type = FILTER_IPV4_5TUPLE;
- fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.src_ip);
- fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.dst_ip);
- fltr.u.ipv4.src_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.src_port);
- fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.dst_port);
-
- if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
- fltr.u.ipv4.protocol = PROTO_TCP;
- else
- fltr.u.ipv4.protocol = PROTO_UDP;
-
- fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+#ifdef TREX_PATCH
+ switch (params->soft_id) {
+ case 100:
+ vnic_dev_packet_filter(enic->vdev, 1, 1, 1, 1, 1);
+ copy_fltr_recv_all(&fltr, &params->input, &enic->rte_dev->data->dev_conf.fdir_conf.mask);
+ break;
+ default:
+#endif
+ enic->fdir.copy_fltr_fn(&fltr, &params->input,
+ &enic->rte_dev->data->dev_conf.fdir_conf.mask);
+#ifdef TREX_PATCH
+ }
+#endif
if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
key->fltr_id = queue;
@@ -238,6 +547,7 @@ void enic_clsf_destroy(struct enic *enic)
vnic_dev_classifier(enic->vdev, CLSF_DEL,
&key->fltr_id, NULL);
rte_free(key);
+ enic->fdir.nodes[index] = NULL;
}
}
diff --git a/src/dpdk/drivers/net/enic/enic_ethdev.c b/src/dpdk/drivers/net/enic/enic_ethdev.c
index 47b07c92..6a86e23f 100644
--- a/src/dpdk/drivers/net/enic/enic_ethdev.c
+++ b/src/dpdk/drivers/net/enic/enic_ethdev.c
@@ -95,10 +95,12 @@ enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_INFO:
dev_warning(enic, "unsupported operation %u", filter_op);
ret = -ENOTSUP;
break;
+ case RTE_ETH_FILTER_INFO:
+ enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
+ break;
default:
dev_err(enic, "unknown operation %u", filter_op);
ret = -EINVAL;
@@ -433,6 +435,9 @@ static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
enic_dev_stats_clear(enic);
}
+
+
+
static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *device_info)
{
@@ -459,6 +464,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
+
+ device_info->speed_capa = ETH_LINK_SPEED_40G;
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
diff --git a/src/dpdk/drivers/net/enic/enic_main.c b/src/dpdk/drivers/net/enic/enic_main.c
index b4ca3710..473bfc3c 100644
--- a/src/dpdk/drivers/net/enic/enic_main.c
+++ b/src/dpdk/drivers/net/enic/enic_main.c
@@ -166,6 +166,7 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
return;
}
+
/* The number of truncated packets can only be calculated by
* subtracting a hardware counter from error packets received by
* the driver. Note: this causes transient inaccuracies in the
@@ -180,7 +181,7 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
r_stats->opackets = stats->tx.tx_frames_ok;
- r_stats->ibytes = stats->rx.rx_bytes_ok;
+ r_stats->ibytes = stats->rx.rx_unicast_bytes_ok+stats->rx.rx_multicast_bytes_ok+stats->rx.rx_broadcast_bytes_ok;
r_stats->obytes = stats->tx.tx_bytes_ok;
r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
@@ -1122,6 +1123,9 @@ static int enic_dev_init(struct enic *enic)
return err;
}
+ /* Get the supported filters */
+ enic_fdir_info(enic);
+
eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
diff --git a/src/dpdk/drivers/net/enic/enic_res.c b/src/dpdk/drivers/net/enic/enic_res.c
index 84c5d336..8a230a16 100644
--- a/src/dpdk/drivers/net/enic/enic_res.c
+++ b/src/dpdk/drivers/net/enic/enic_res.c
@@ -62,6 +62,7 @@ int enic_get_vnic_config(struct enic *enic)
return err;
}
+
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(enic->vdev, \
@@ -98,6 +99,10 @@ int enic_get_vnic_config(struct enic *enic)
enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
max_t(u16, ENIC_MIN_MTU, c->mtu));
+ enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
+ dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
+ ? "" : "not "));
+
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
diff --git a/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
index d478a159..72963a89 100644
--- a/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5784,13 +5784,17 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
-
+#define TREX_PATCH
+#ifndef TREX_PATCH
+ // no real reason to block this.
+ // We configure rules using FDIR and ethertype that point to same queue, so there are no race condition issues.
if (filter->ether_type == ETHER_TYPE_IPv4 ||
filter->ether_type == ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
" ethertype filter.", filter->ether_type);
return -EINVAL;
}
+#endif
if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
PMD_DRV_LOG(ERR, "mac compare is unsupported.");
diff --git a/src/dpdk/drivers/net/mlx5/mlx5.c b/src/dpdk/drivers/net/mlx5/mlx5.c
index d96a9aff..303b917b 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5.c
@@ -181,6 +181,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
}
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
+
+ mlx5_stats_free(dev);
+
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -366,6 +369,13 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
unsigned int mps;
int idx;
int i;
+ static int ibv_was_init=0;
+
+ if (ibv_was_init==0) {
+ ibv_fork_init();
+ ibv_was_init=1;
+ }
+
(void)pci_drv;
assert(pci_drv == &mlx5_driver.pci_drv);
@@ -511,7 +521,16 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
+
+
err = mlx5_args(priv, pci_dev->devargs);
+
+ /* TREX PATCH */
+ /* set for maximum performance default */
+ priv->txq_inline =128;
+ priv->txqs_inline =4;
+
+
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
@@ -751,7 +770,6 @@ rte_mlx5_pmd_init(const char *name, const char *args)
* using this PMD, which is not supported in forked processes.
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
- ibv_fork_init();
rte_eal_pci_register(&mlx5_driver.pci_drv);
return 0;
}
diff --git a/src/dpdk/drivers/net/mlx5/mlx5.h b/src/dpdk/drivers/net/mlx5/mlx5.h
index 3a866098..68bad904 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5.h
+++ b/src/dpdk/drivers/net/mlx5/mlx5.h
@@ -84,6 +84,34 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
};
+struct mlx5_stats_priv {
+
+ struct rte_eth_stats m_shadow;
+ uint32_t n_stats; /* number of counters */
+
+ void * et_stats ;/* point to ethtool counter struct ethtool_stats*/
+
+ /* index into ethtool */
+ uint16_t inx_rx_vport_unicast_bytes;
+ uint16_t inx_rx_vport_multicast_bytes;
+ uint16_t inx_rx_vport_broadcast_bytes;
+ uint16_t inx_rx_vport_unicast_packets;
+ uint16_t inx_rx_vport_multicast_packets;
+ uint16_t inx_rx_vport_broadcast_packets;
+ uint16_t inx_tx_vport_unicast_bytes;
+ uint16_t inx_tx_vport_multicast_bytes;
+ uint16_t inx_tx_vport_broadcast_bytes;
+ uint16_t inx_tx_vport_unicast_packets;
+ uint16_t inx_tx_vport_multicast_packets;
+ uint16_t inx_tx_vport_broadcast_packets;
+ uint16_t inx_rx_wqe_err;
+ uint16_t inx_rx_crc_errors_phy;
+ uint16_t inx_rx_in_range_len_errors_phy;
+ uint16_t inx_rx_symbol_err_phy;
+ uint16_t inx_tx_errors_phy;
+};
+
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device. */
struct ibv_context *ctx; /* Verbs context. */
@@ -135,6 +163,7 @@ struct priv {
unsigned int reta_idx_n; /* RETA index size. */
struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
rte_spinlock_t lock; /* Lock for control functions. */
+ struct mlx5_stats_priv m_stats;
};
/* Local storage for secondary process data. */
@@ -243,6 +272,8 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *);
void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
void mlx5_stats_reset(struct rte_eth_dev *);
+void mlx5_stats_free(struct rte_eth_dev *dev);
+
/* mlx5_vlan.c */
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
new file mode 100644
index 00000000..9fdfff84
--- /dev/null
+++ b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
@@ -0,0 +1,8 @@
+#ifndef HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE
+#define HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE 1
+#endif /* HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE */
+
+#ifndef HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE
+#define HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE 1
+#endif /* HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE */
+
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
index 73eb00ec..4ba3bb9f 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
@@ -37,12 +37,14 @@
#include <string.h>
#include <errno.h>
+#define TREX_PATCH
+
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-pedantic"
#endif
-#include <infiniband/verbs.h>
+#include <infiniband/verbs_exp.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -67,6 +69,10 @@ struct fdir_flow_desc {
uint16_t src_port;
uint32_t src_ip[4];
uint32_t dst_ip[4];
+ uint8_t tos;
+ uint8_t ip_id;
+ uint8_t proto;
+
uint8_t mac[6];
uint16_t vlan_tag;
enum hash_rxq_type type;
@@ -102,6 +108,7 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci;
/* Set MAC address. */
+#ifndef TREX_PATCH
if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
rte_memcpy(desc->mac,
fdir_filter->input.flow.mac_vlan_flow.mac_addr.
@@ -110,6 +117,13 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
desc->type = HASH_RXQ_ETH;
return;
}
+#else
+ if (fdir_filter->input.flow.ip4_flow.ip_id == 2) {
+ desc->type = HASH_RXQ_ETH;
+ desc->ip_id = fdir_filter->input.flow.ip4_flow.ip_id;
+ return;
+ }
+#endif
/* Set mode */
switch (fdir_filter->input.flow_type) {
@@ -141,9 +155,13 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
+
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
+ desc->tos = fdir_filter->input.flow.ip4_flow.ttl; /* TTL is mapped to TOS TREX_PATCH */
+ desc->ip_id = fdir_filter->input.flow.ip4_flow.ip_id;
+ desc->proto = fdir_filter->input.flow.ip4_flow.proto;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
@@ -157,12 +175,17 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
rte_memcpy(desc->dst_ip,
fdir_filter->input.flow.ipv6_flow.dst_ip,
sizeof(desc->dst_ip));
+ desc->tos = (uint8_t)fdir_filter->input.flow.ipv6_flow.hop_limits; /* TTL is mapped to TOS - TREX_PATCH */
+ desc->ip_id = (uint8_t)fdir_filter->input.flow.ipv6_flow.flow_label;
+ desc->proto = fdir_filter->input.flow.ipv6_flow.proto;
+
break;
default:
break;
}
}
+
/**
* Check if two flow descriptors overlap according to configured mask.
*
@@ -197,6 +220,12 @@ priv_fdir_overlap(const struct priv *priv,
((desc1->dst_port & mask->dst_port_mask) !=
(desc2->dst_port & mask->dst_port_mask)))
return 0;
+
+ if ( (desc1->tos != desc2->tos) ||
+ (desc1->ip_id != desc2->ip_id) ||
+ (desc1->proto != desc2->proto) )
+ return 0;
+
switch (desc1->type) {
case HASH_RXQ_IPV4:
case HASH_RXQ_UDPV4:
@@ -204,8 +233,9 @@ priv_fdir_overlap(const struct priv *priv,
if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
(desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
- (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
+ (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
return 0;
+
break;
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
@@ -251,8 +281,8 @@ priv_fdir_flow_add(struct priv *priv,
struct ibv_exp_flow_attr *attr = &data->attr;
uintptr_t spec_offset = (uintptr_t)&data->spec;
struct ibv_exp_flow_spec_eth *spec_eth;
- struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
- struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
+ struct ibv_exp_flow_spec_ipv4_ext *spec_ipv4;
+ struct ibv_exp_flow_spec_ipv6_ext *spec_ipv6;
struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
struct mlx5_fdir_filter *iter_fdir_filter;
unsigned int i;
@@ -264,8 +294,10 @@ priv_fdir_flow_add(struct priv *priv,
(iter_fdir_filter->flow != NULL) &&
(priv_fdir_overlap(priv,
&mlx5_fdir_filter->desc,
- &iter_fdir_filter->desc)))
- return EEXIST;
+ &iter_fdir_filter->desc))){
+ ERROR("overlap rules, please check your rules");
+ return EEXIST;
+ }
/*
* No padding must be inserted by the compiler between attr and spec.
@@ -288,6 +320,7 @@ priv_fdir_flow_add(struct priv *priv,
/* Update priority */
attr->priority = 2;
+#ifndef TREX_PATCH
if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
/* MAC Address */
for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) {
@@ -297,6 +330,14 @@ priv_fdir_flow_add(struct priv *priv,
}
goto create_flow;
}
+#else
+ // empty mask means "match everything". This rule will match all packets, no matter what is the ether type
+ if (desc->ip_id == 2) {
+ spec_eth->val.ether_type = 0x0806;
+ spec_eth->mask.ether_type = 0x0000;
+ goto create_flow;
+ }
+#endif
switch (desc->type) {
case HASH_RXQ_IPV4:
@@ -305,10 +346,10 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_eth->size;
/* Set IP spec */
- spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
+ spec_ipv4 = (struct ibv_exp_flow_spec_ipv4_ext *)spec_offset;
/* The second specification must be IP. */
- assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
+ assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4_EXT);
assert(spec_ipv4->size == sizeof(*spec_ipv4));
spec_ipv4->val.src_ip =
@@ -318,6 +359,21 @@ priv_fdir_flow_add(struct priv *priv,
spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
+ /* PROTO */
+ spec_ipv4->val.proto = desc->proto & mask->ipv4_mask.proto;
+ spec_ipv4->mask.proto = mask->ipv4_mask.proto;
+
+#ifdef TREX_PATCH
+ /* TOS */
+ if (desc->ip_id == 1) {
+ spec_ipv4->mask.tos = 0x1;
+ spec_ipv4->val.tos = 0x1;
+ } else {
+ spec_ipv4->mask.tos = 0x0;
+ spec_ipv4->val.tos = 0x0;
+ }
+ // spec_ipv4->val.tos = desc->tos & spec_ipv4->mask.tos;// & mask->ipv4_mask.tos;
+#endif
/* Update priority */
attr->priority = 1;
@@ -332,10 +388,10 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_eth->size;
/* Set IP spec */
- spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
+ spec_ipv6 = (struct ibv_exp_flow_spec_ipv6_ext *)spec_offset;
/* The second specification must be IP. */
- assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
+ assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6_EXT);
assert(spec_ipv6->size == sizeof(*spec_ipv6));
for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
@@ -351,6 +407,20 @@ priv_fdir_flow_add(struct priv *priv,
mask->ipv6_mask.dst_ip,
sizeof(spec_ipv6->mask.dst_ip));
+ spec_ipv6->val.next_hdr = desc->proto & mask->ipv6_mask.proto;
+ spec_ipv6->mask.next_hdr = mask->ipv6_mask.proto;
+
+#ifdef TREX_PATCH
+ /* TOS */
+ if (desc->ip_id == 1) {
+ spec_ipv6->mask.traffic_class = 0x1;
+ spec_ipv6->val.traffic_class = 0x1;
+ } else {
+ spec_ipv6->mask.traffic_class = 0;
+ spec_ipv6->val.traffic_class = 0;
+ }
+#endif
+
/* Update priority */
attr->priority = 1;
@@ -722,8 +792,10 @@ priv_fdir_filter_add(struct priv *priv,
/* Duplicate filters are currently unsupported. */
mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
if (mlx5_fdir_filter != NULL) {
+#ifndef TREX_PATCH
ERROR("filter already exists");
- return EINVAL;
+#endif
+ return EEXIST;
}
/* Create new flow director filter. */
@@ -847,9 +919,11 @@ priv_fdir_filter_delete(struct priv *priv,
return 0;
}
+#ifndef TREX_PATCH
ERROR("%p: flow director delete failed, cannot find filter",
(void *)priv);
- return EINVAL;
+#endif
+ return ENOENT;
}
/**
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
index 29c137cd..6be01d39 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
@@ -102,7 +102,7 @@ const struct hash_rxq_init hash_rxq_init[] = {
ETH_RSS_FRAG_IPV4),
.flow_priority = 1,
.flow_spec.ipv4 = {
- .type = IBV_EXP_FLOW_SPEC_IPV4,
+ .type = IBV_EXP_FLOW_SPEC_IPV4_EXT,
.size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
@@ -140,7 +140,7 @@ const struct hash_rxq_init hash_rxq_init[] = {
ETH_RSS_FRAG_IPV6),
.flow_priority = 1,
.flow_spec.ipv6 = {
- .type = IBV_EXP_FLOW_SPEC_IPV6,
+ .type = IBV_EXP_FLOW_SPEC_IPV6_EXT,
.size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c
index fce3381a..c0bcfd03 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c
@@ -908,7 +908,7 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->wqe->mpw.eseg.rsvd2 = 0;
mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
- MLX5_OPCODE_LSO_MPW);
+ MLX5_OPCODE_TSO);
mpw->wqe->mpw.ctrl.data[2] = 0;
mpw->wqe->mpw.ctrl.data[3] = 0;
mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0];
@@ -1107,7 +1107,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->wqe = &(*txq->wqes)[idx];
mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
- MLX5_OPCODE_LSO_MPW);
+ MLX5_OPCODE_TSO);
mpw->wqe->mpw_inl.ctrl.data[2] = 0;
mpw->wqe->mpw_inl.ctrl.data[3] = 0;
mpw->wqe->mpw_inl.eseg.mss = htons(length);
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
index f6e2cbac..d87dd19b 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
@@ -173,8 +173,8 @@ struct hash_rxq_init {
uint16_t size;
} hdr;
struct ibv_exp_flow_spec_tcp_udp tcp_udp;
- struct ibv_exp_flow_spec_ipv4 ipv4;
- struct ibv_exp_flow_spec_ipv6 ipv6;
+ struct ibv_exp_flow_spec_ipv4_ext ipv4;
+ struct ibv_exp_flow_spec_ipv6_ext ipv6;
struct ibv_exp_flow_spec_eth eth;
} flow_spec; /* Flow specification template. */
const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_stats.c b/src/dpdk/drivers/net/mlx5/mlx5_stats.c
index 2d3cb519..788ef939 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_stats.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_stats.c
@@ -44,6 +44,10 @@
#include "mlx5_rxtx.h"
#include "mlx5_defs.h"
+
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+
/**
* DPDK callback to get device statistics.
*
@@ -52,60 +56,241 @@
* @param[out] stats
* Stats structure output buffer.
*/
+
+
+static void
+mlx5_stats_read_hw(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats){
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ unsigned int i;
+
+ struct rte_eth_stats tmp = {0};
+ struct ethtool_stats *et_stats = (struct ethtool_stats *)lps->et_stats;
+ struct ifreq ifr;
+
+ et_stats->cmd = ETHTOOL_GSTATS;
+ et_stats->n_stats = lps->n_stats;
+
+ ifr.ifr_data = (caddr_t) et_stats;
+
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic values for mlnx5 ");
+ }
+
+ tmp.ibytes += et_stats->data[lps->inx_rx_vport_unicast_bytes] +
+ et_stats->data[lps->inx_rx_vport_multicast_bytes] +
+ et_stats->data[lps->inx_rx_vport_broadcast_bytes];
+
+ tmp.ipackets += et_stats->data[lps->inx_rx_vport_unicast_packets] +
+ et_stats->data[lps->inx_rx_vport_multicast_packets] +
+ et_stats->data[lps->inx_rx_vport_broadcast_packets];
+
+ tmp.ierrors += (et_stats->data[lps->inx_rx_wqe_err] +
+ et_stats->data[lps->inx_rx_crc_errors_phy] +
+ et_stats->data[lps->inx_rx_in_range_len_errors_phy] +
+ et_stats->data[lps->inx_rx_symbol_err_phy]);
+
+ tmp.obytes += et_stats->data[lps->inx_tx_vport_unicast_bytes] +
+ et_stats->data[lps->inx_tx_vport_multicast_bytes] +
+ et_stats->data[lps->inx_tx_vport_broadcast_bytes];
+
+ tmp.opackets += (et_stats->data[lps->inx_tx_vport_unicast_packets] +
+ et_stats->data[lps->inx_tx_vport_multicast_packets] +
+ et_stats->data[lps->inx_tx_vport_broadcast_packets]);
+
+ tmp.oerrors += et_stats->data[lps->inx_tx_errors_phy];
+
+ /* SW Rx */
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ if (rxq) {
+ tmp.imissed += rxq->stats.idropped;
+ tmp.rx_nombuf += rxq->stats.rx_nombuf;
+ }
+ }
+
+ /*SW Tx */
+ for (i = 0; (i != priv->txqs_n); ++i) {
+ struct txq *txq = (*priv->txqs)[i];
+ if (txq) {
+ tmp.oerrors += txq->stats.odropped;
+ }
+ }
+
+ *stats =tmp;
+}
+
+void
+mlx5_stats_free(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+
+ if ( lps->et_stats ){
+ free(lps->et_stats);
+ lps->et_stats=0;
+ }
+}
+
+
+static void
+mlx5_stats_init(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ struct rte_eth_stats tmp = {0};
+
+ unsigned int i;
+ unsigned int idx;
+ char ifname[IF_NAMESIZE];
+ struct ifreq ifr;
+
+ struct ethtool_stats *et_stats = NULL;
+ struct ethtool_drvinfo drvinfo;
+ struct ethtool_gstrings *strings = NULL;
+ unsigned int n_stats, sz_str, sz_stats;
+
+ if (priv_get_ifname(priv, &ifname)) {
+ WARN("unable to get interface name");
+ return;
+ }
+ /* How many statistics are available ? */
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr.ifr_data = (caddr_t) &drvinfo;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get driver info for %s", ifname);
+ return;
+ }
+
+ n_stats = drvinfo.n_stats;
+ if (n_stats < 1) {
+ WARN("no statistics available for %s", ifname);
+ return;
+ }
+ lps->n_stats = n_stats;
+
+ /* Allocate memory to grab stat names and values */
+ sz_str = n_stats * ETH_GSTRING_LEN;
+ sz_stats = n_stats * sizeof(uint64_t);
+ strings = calloc(1, sz_str + sizeof(struct ethtool_gstrings));
+ if (!strings) {
+ WARN("unable to allocate memory for strings");
+ return;
+ }
+
+ et_stats = calloc(1, sz_stats + sizeof(struct ethtool_stats));
+ if (!et_stats) {
+ free(strings);
+ WARN("unable to allocate memory for stats");
+ }
+
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = n_stats;
+ ifr.ifr_data = (caddr_t) strings;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic names for %s", ifname);
+ free(strings);
+ free(et_stats);
+ return;
+ }
+
+ for (i = 0; (i != n_stats); ++i) {
+
+ const char * curr_string = (const char*) &(strings->data[i * ETH_GSTRING_LEN]);
+
+ if (!strcmp("rx_vport_unicast_bytes", curr_string)) lps->inx_rx_vport_unicast_bytes = i;
+ if (!strcmp("rx_vport_multicast_bytes", curr_string)) lps->inx_rx_vport_multicast_bytes = i;
+ if (!strcmp("rx_vport_broadcast_bytes", curr_string)) lps->inx_rx_vport_broadcast_bytes = i;
+
+ if (!strcmp("rx_vport_unicast_packets", curr_string)) lps->inx_rx_vport_unicast_packets = i;
+ if (!strcmp("rx_vport_multicast_packets", curr_string)) lps->inx_rx_vport_multicast_packets = i;
+ if (!strcmp("rx_vport_broadcast_packets", curr_string)) lps->inx_rx_vport_broadcast_packets = i;
+
+ if (!strcmp("tx_vport_unicast_bytes", curr_string)) lps->inx_tx_vport_unicast_bytes = i;
+ if (!strcmp("tx_vport_multicast_bytes", curr_string)) lps->inx_tx_vport_multicast_bytes = i;
+ if (!strcmp("tx_vport_broadcast_bytes", curr_string)) lps->inx_tx_vport_broadcast_bytes = i;
+
+ if (!strcmp("tx_vport_unicast_packets", curr_string)) lps->inx_tx_vport_unicast_packets = i;
+ if (!strcmp("tx_vport_multicast_packets", curr_string)) lps->inx_tx_vport_multicast_packets = i;
+ if (!strcmp("tx_vport_broadcast_packets", curr_string)) lps->inx_tx_vport_broadcast_packets = i;
+
+ if (!strcmp("rx_wqe_err", curr_string)) lps->inx_rx_wqe_err = i;
+ if (!strcmp("rx_crc_errors_phy", curr_string)) lps->inx_rx_crc_errors_phy = i;
+ if (!strcmp("rx_in_range_len_errors_phy", curr_string)) lps->inx_rx_in_range_len_errors_phy = i;
+ if (!strcmp("rx_symbol_err_phy", curr_string)) lps->inx_rx_symbol_err_phy = i;
+
+ if (!strcmp("tx_errors_phy", curr_string)) lps->inx_tx_errors_phy = i;
+ }
+
+ lps->et_stats =(void *)et_stats;
+
+ if (!lps->inx_rx_vport_unicast_bytes ||
+ !lps->inx_rx_vport_multicast_bytes ||
+ !lps->inx_rx_vport_broadcast_bytes ||
+ !lps->inx_rx_vport_unicast_packets ||
+ !lps->inx_rx_vport_multicast_packets ||
+ !lps->inx_rx_vport_broadcast_packets ||
+ !lps->inx_tx_vport_unicast_bytes ||
+ !lps->inx_tx_vport_multicast_bytes ||
+ !lps->inx_tx_vport_broadcast_bytes ||
+ !lps->inx_tx_vport_unicast_packets ||
+ !lps->inx_tx_vport_multicast_packets ||
+ !lps->inx_tx_vport_broadcast_packets ||
+ !lps->inx_rx_wqe_err ||
+ !lps->inx_rx_crc_errors_phy ||
+ !lps->inx_rx_in_range_len_errors_phy) {
+ WARN("Counters are not recognized %s", ifname);
+ return;
+ }
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ /* copy yo shadow at first time */
+ lps->m_shadow = tmp;
+
+ free(strings);
+}
+
+
+static void
+mlx5_stats_diff(struct rte_eth_stats *a,
+ struct rte_eth_stats *b,
+ struct rte_eth_stats *c){
+ #define MLX5_DIFF(cnt) { a->cnt = (b->cnt - c->cnt); }
+
+ MLX5_DIFF(ipackets);
+ MLX5_DIFF(opackets);
+ MLX5_DIFF(ibytes);
+ MLX5_DIFF(obytes);
+ MLX5_DIFF(imissed);
+
+ MLX5_DIFF(ierrors);
+ MLX5_DIFF(oerrors);
+ MLX5_DIFF(rx_nombuf);
+}
+
+
void
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct priv *priv = mlx5_get_priv(dev);
- struct rte_eth_stats tmp = {0};
- unsigned int i;
- unsigned int idx;
-
- priv_lock(priv);
- /* Add software counters. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
-
- if (rxq == NULL)
- continue;
- idx = rxq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_ipackets[idx] += rxq->stats.ipackets;
- tmp.q_ibytes[idx] += rxq->stats.ibytes;
-#endif
- tmp.q_errors[idx] += (rxq->stats.idropped +
- rxq->stats.rx_nombuf);
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.ipackets += rxq->stats.ipackets;
- tmp.ibytes += rxq->stats.ibytes;
-#endif
- tmp.ierrors += rxq->stats.idropped;
- tmp.rx_nombuf += rxq->stats.rx_nombuf;
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
-
- if (txq == NULL)
- continue;
- idx = txq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_opackets[idx] += txq->stats.opackets;
- tmp.q_obytes[idx] += txq->stats.obytes;
-#endif
- tmp.q_errors[idx] += txq->stats.odropped;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.opackets += txq->stats.opackets;
- tmp.obytes += txq->stats.obytes;
-#endif
- tmp.oerrors += txq->stats.odropped;
- }
-#ifndef MLX5_PMD_SOFT_COUNTERS
- /* FIXME: retrieve and add hardware counters. */
-#endif
- *stats = tmp;
+
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ priv_lock(priv);
+
+ if (lps->et_stats == NULL) {
+ mlx5_stats_init(dev);
+ }
+ struct rte_eth_stats tmp = {0};
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ mlx5_stats_diff(stats,
+ &tmp,
+ &lps->m_shadow);
+
priv_unlock(priv);
}
@@ -119,26 +304,20 @@ void
mlx5_stats_reset(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- unsigned int i;
- unsigned int idx;
-
- priv_lock(priv);
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- idx = (*priv->rxqs)[i]->stats.idx;
- (*priv->rxqs)[i]->stats =
- (struct mlx5_rxq_stats){ .idx = idx };
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- if ((*priv->txqs)[i] == NULL)
- continue;
- idx = (*priv->txqs)[i]->stats.idx;
- (*priv->txqs)[i]->stats =
- (struct mlx5_txq_stats){ .idx = idx };
- }
-#ifndef MLX5_PMD_SOFT_COUNTERS
- /* FIXME: reset hardware counters. */
-#endif
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+
+ priv_lock(priv);
+
+ if (lps->et_stats == NULL) {
+ mlx5_stats_init(dev);
+ }
+ struct rte_eth_stats tmp = {0};
+
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ /* copy to shadow */
+ lps->m_shadow = tmp;
+
priv_unlock(priv);
}