summaryrefslogtreecommitdiffstats
path: root/drivers/net/enic
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-08 14:07:29 +0100
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-08 14:10:05 +0100
commit6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9 (patch)
tree1b1fb3f903b2282e261ade69e3c17952b3fd3464 /drivers/net/enic
parent32e04ea00cd159613e04acef75e52bfca6eeff2f (diff)
Imported Upstream version 16.11
Change-Id: I1944c65ddc88a9ad70f8c0eb6731552b84fbcb77 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/enic')
-rw-r--r--drivers/net/enic/base/vnic_dev.c33
-rw-r--r--drivers/net/enic/base/vnic_dev.h3
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h346
-rw-r--r--drivers/net/enic/base/vnic_rq.h1
-rw-r--r--drivers/net/enic/enic.h34
-rw-r--r--drivers/net/enic/enic_clsf.c292
-rw-r--r--drivers/net/enic/enic_ethdev.c79
-rw-r--r--drivers/net/enic/enic_main.c222
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_rxtx.c60
10 files changed, 917 insertions, 158 deletions
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 4db21a4d..84e4840a 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -470,6 +470,18 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
}
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
+{
+ u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err)
+ return 0;
+ return (a1 >= (u32)FILTER_DPDK_1);
+}
+
static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
@@ -1007,7 +1019,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
* @data: filter data
*/
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data)
+ struct filter_v2 *data)
{
u64 a0, a1;
int wait = 1000;
@@ -1016,11 +1028,20 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_tlv *tlv, *tlv_va;
struct filter_action *action;
u64 tlv_size;
+ u32 filter_size;
static unsigned int unique_id;
char z_name[RTE_MEMZONE_NAMESIZE];
+ enum vnic_devcmd_cmd dev_cmd;
+
if (cmd == CLSF_ADD) {
- tlv_size = sizeof(struct filter) +
+ if (data->type == FILTER_DPDK_1)
+ dev_cmd = CMD_ADD_ADV_FILTER;
+ else
+ dev_cmd = CMD_ADD_FILTER;
+
+ filter_size = vnic_filter_size(data);
+ tlv_size = filter_size +
sizeof(struct filter_action) +
2*sizeof(struct filter_tlv);
snprintf((char *)z_name, sizeof(z_name),
@@ -1034,12 +1055,12 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
a1 = tlv_size;
memset(tlv, 0, tlv_size);
tlv->type = CLSF_TLV_FILTER;
- tlv->length = sizeof(struct filter);
- *(struct filter *)&tlv->val = *data;
+ tlv->length = filter_size;
+ memcpy(&tlv->val, (void *)data, filter_size);
tlv = (struct filter_tlv *)((char *)tlv +
sizeof(struct filter_tlv) +
- sizeof(struct filter));
+ filter_size);
tlv->type = CLSF_TLV_ACTION;
tlv->length = sizeof(struct filter_action);
@@ -1047,7 +1068,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
action->type = FILTER_ACTION_RQ_STEERING;
action->u.rq_idx = *entry;
- ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+ ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
*entry = (u16)a0;
vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 689442f3..06ebd4ce 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -134,6 +134,7 @@ void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -201,7 +202,7 @@ int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data);
+ struct filter_v2 *data);
#ifdef ENIC_VXLAN
int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
u8 overlay, u8 config);
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index b3d5a6cc..785fd6fd 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* Copyright (c) 2014, Cisco Systems, Inc.
@@ -126,7 +126,8 @@ enum vnic_devcmd_cmd {
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
- * out: a0=value */
+ * out: a0=value
+ */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
@@ -146,8 +147,9 @@ enum vnic_devcmd_cmd {
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
- CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+#define CMD_GET_MAC_ADDR CMD_MAC_ADDR /* some uses are aliased */
/* add addr from (u48)a0 */
CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
@@ -387,9 +389,8 @@ enum vnic_devcmd_cmd {
* Subvnic migration from MQ <--> VF.
* Enable the LIF migration from MQ to VF and vice versa. MQ and VF
* indexes are statically bound at the time of initialization.
- * Based on the
- * direction of migration, the resources of either MQ or the VF shall
- * be attached to the LIF.
+ * Based on the direction of migration, the resources of either MQ or
+ * the VF shall be attached to the LIF.
* in: (u32)a0=Direction of Migration
* 0=> Migrate to VF
* 1=> Migrate to MQ
@@ -397,7 +398,6 @@ enum vnic_devcmd_cmd {
*/
CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
-
/*
* Register / Deregister the notification block for MQ subvnics
* in:
@@ -433,6 +433,10 @@ enum vnic_devcmd_cmd {
* in: (u64) a0= filter address
* (u32) a1= size of filter
* out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capability query supported
+ * (u64) a1= MAX filter type supported
*/
CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
@@ -471,23 +475,133 @@ enum vnic_devcmd_cmd {
CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
/*
- * Enable/Disable overlay offloads on the given vnic
+ * UEFI BOOT API: (u64)a0= UEFI FLS_CMD_xxx
+ * (ui64)a1= paddr for the info buffer
+ */
+ CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64),
+
+ /*
+ * Return the iSCSI config details required by the EFI Option ROM
+ * in: (u32) a0=0 Get Boot Info for PXE eNIC as per pxe_boot_config_t
+ * a0=1 Get Boot info for iSCSI enic as per
+ * iscsi_boot_efi_cfg_t
+ * in: (u64) a1=Host address where iSCSI config info is returned
+ */
+ CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65),
+
+ /*
+ * Create a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u32) a1 = Remote QP
+ * (u32) a2 = RDMA-RQ
+ * (u16) a3 = RQ Res Group
+ * (u16) a4 = SQ Res Group
+ * (u32) a5 = Protection Domain
+ * (u64) a6 = Remote MAC
+ * (u32) a7 = start PSN
+ * (u16) a8 = MSS
+ * (u32) a9 = protocol version
+ */
+ CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66),
+
+ /*
+ * Delete a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ */
+ CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67),
+
+ /*
+ * Retrieve a Queue Pair's status information (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u64) a1 = host buffer addr for QP status struct
+ * (u32) a2 = length of the buffer
+ */
+ CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68),
+
+ /*
+ * Use this devcmd for agreeing on the highest common version supported
+ * by both driver and fw for by features who need such a facility.
+ * in: (u64) a0 = feature (driver requests for the supported versions
+ * on this feature)
+ * out: (u64) a0 = bitmap of all supported versions for that feature
+ */
+ CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+ /*
+ * Initialize the RDMA notification work queue
+ * in: (u64) a0 = host buffer address
+ * in: (u16) a1 = number of entries in buffer
+ * in: (u16) a2 = resource group number
+ * in: (u16) a3 = CQ number to post completion
+ */
+ CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70),
+
+ /*
+ * De-init the RDMA notification work queue
+ * in: (u64) a0=resource group number
+ */
+ CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71),
+
+ /*
+ * Control (Enable/Disable) overlay offloads on the given vnic
* in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
* a0 = OVERLAY_FEATURE_VXLAN : VxLAN
- * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable
- * a1 = OVERLAY_OFFLOAD_DISABLE : Disable
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+ * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
*/
- CMD_OVERLAY_OFFLOAD_ENABLE_DISABLE =
- _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+ CMD_OVERLAY_OFFLOAD_CTRL =
+ _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
/*
* Configuration of overlay offloads feature on a given vNIC
- * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE
- * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN
- * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN
- * in: (u16) a2 = unsigned short int port information
+ * in: (u8) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a1 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Return the configured name for the device
+ * in: (u64) a0=Host address where the name is copied
+ * (u32) a1=Size of the buffer
+ */
+ CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74),
+
+ /*
+ * Enable group interrupt for the VF
+ * in: (u32) a0 = GRPINTR_ENABLE : enable
+ * a0 = GRPINTR_DISABLE : disable
+ * a0 = GRPINTR_UPD_VECT: update group vector addr
+ * in: (u32) a1 = interrupt group count
+ * in: (u64) a2 = Start of host buffer address for DMAing group
+ * vector bitmap
+ * in: (u64) a3 = Stride between group vectors
+ */
+ CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75),
+
+ /*
+ * Set cq arrary base and size in a list of consective wqs and
+ * rqs for a device
+ * in: (u16) a0 = the wq relative index in the device.
+ * -1 indicates skipping wq configuration
+ * in: (u16) a1 = the wcq relative index in the device
+ * in: (u16) a2 = the rq relative index in the device
+ * -1 indicates skipping rq configuration
+ * in: (u16) a3 = the rcq relative index in the device
+ */
+ CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76),
+
+ /*
+ * Add an advanced filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capabliity query supported
+ * (u64) a1= MAX filter type supported
+ */
+ CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
};
/* CMD_ENABLE2 flags */
@@ -520,6 +634,9 @@ enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+ STAT_FAILOVER = 1 << 2, /* always set on vnics in pci standby state
+ * if seen a failover to the standby happened
+ */
};
enum vnic_devcmd_error {
@@ -558,9 +675,9 @@ enum fwinfo_asic_type {
FWINFO_ASIC_TYPE_UNKNOWN,
FWINFO_ASIC_TYPE_PALO,
FWINFO_ASIC_TYPE_SERENO,
+ FWINFO_ASIC_TYPE_CRUZ,
};
-
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
@@ -595,25 +712,16 @@ struct vnic_devcmd_provinfo {
*/
#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
-#define FILTER_FIELDS_USNIC (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4))
-
-#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4) | \
- FILTER_FIELD_VALID(5))
-
-#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2))
-
#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+#define FILTER_FIELDS_USNIC (FILTER_FIELD_USNIC_VLAN | \
+ FILTER_FIELD_USNIC_ETHTYPE | \
+ FILTER_FIELD_USNIC_PROTO | \
+ FILTER_FIELD_USNIC_ID)
+
struct filter_usnic_id {
u32 flags;
u16 vlan;
@@ -628,10 +736,18 @@ struct filter_usnic_id {
#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_5TUP_PROTO | \
+ FILTER_FIELD_5TUP_SRC_AD | \
+ FILTER_FIELD_5TUP_DST_AD | \
+ FILTER_FIELD_5TUP_SRC_PT | \
+ FILTER_FIELD_5TUP_DST_PT)
+
/* Enums for the protocol field. */
enum protocol_e {
PROTO_UDP = 0,
PROTO_TCP = 1,
+ PROTO_IPV4 = 2,
+ PROTO_IPV6 = 3
};
struct filter_ipv4_5tuple {
@@ -646,12 +762,78 @@ struct filter_ipv4_5tuple {
#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VMQ_VLAN | \
+ FILTER_FIELD_VMQ_MAC)
+
+#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC
+
struct filter_mac_vlan {
u32 flags;
u16 vlan;
u8 mac_addr[6];
} __attribute__((packed));
+#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_VLAN_IP_3TUP (FILTER_FIELD_VLAN_IP_3TUP_VLAN | \
+ FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_AD | \
+ FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_PT)
+
+struct filter_vlan_ip_3tuple {
+ u32 flags;
+ u16 vlan;
+ u16 l3_protocol;
+ union {
+ u32 dst_addr_v4;
+ u8 dst_addr_v6[16];
+ } u;
+ u32 l4_protocol;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_GENERIC_1_BYTES 64
+
+enum filter_generic_1_layer {
+ FILTER_GENERIC_1_L2,
+ FILTER_GENERIC_1_L3,
+ FILTER_GENERIC_1_L4,
+ FILTER_GENERIC_1_L5,
+ FILTER_GENERIC_1_NUM_LAYERS
+};
+
+#define FILTER_GENERIC_1_IPV4 (1 << 0)
+#define FILTER_GENERIC_1_IPV6 (1 << 1)
+#define FILTER_GENERIC_1_UDP (1 << 2)
+#define FILTER_GENERIC_1_TCP (1 << 3)
+#define FILTER_GENERIC_1_TCP_OR_UDP (1 << 4)
+#define FILTER_GENERIC_1_IP4SUM_OK (1 << 5)
+#define FILTER_GENERIC_1_L4SUM_OK (1 << 6)
+#define FILTER_GENERIC_1_IPFRAG (1 << 7)
+
+#define FILTER_GENERIC_1_KEY_LEN 64
+
+/*
+ * Version 1 of generic filter specification
+ * position is only 16 bits, reserving positions > 64k to be used by firmware
+ */
+struct filter_generic_1 {
+ u16 position; /* lower position comes first */
+ u32 mask_flags;
+ u32 val_flags;
+ u16 mask_vlan;
+ u16 val_vlan;
+ struct {
+ u8 mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means "don't care"*/
+ u8 val[FILTER_GENERIC_1_KEY_LEN];
+ } __attribute__((packed)) layer[FILTER_GENERIC_1_NUM_LAYERS];
+} __attribute__((packed));
+
/* Specifies the filter_action type. */
enum {
FILTER_ACTION_RQ_STEERING = 0,
@@ -670,6 +852,10 @@ enum filter_type {
FILTER_USNIC_ID = 0,
FILTER_IPV4_5TUPLE = 1,
FILTER_MAC_VLAN = 2,
+ FILTER_VLAN_IP_3TUPLE = 3,
+ FILTER_NVGRE_VMQ = 4,
+ FILTER_USNIC_IP = 5,
+ FILTER_DPDK_1 = 6,
FILTER_MAX
};
@@ -679,6 +865,27 @@ struct filter {
struct filter_usnic_id usnic;
struct filter_ipv4_5tuple ipv4;
struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ } u;
+} __attribute__((packed));
+
+/*
+ * This is a strict superset of "struct filter" and exists only
+ * because many drivers use "sizeof (struct filter)" in deciding TLV size.
+ * This new, larger struct filter would cause any code that uses that method
+ * to not work with older firmware, so we add filter_v2 to hold the
+ * new filter types. Drivers should use vnic_filter_size() to determine
+ * the TLV size instead of sizeof (struct fiter_v2) to guard against future
+ * growth.
+ */
+struct filter_v2 {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ struct filter_generic_1 generic_1;
} u;
} __attribute__((packed));
@@ -687,14 +894,55 @@ enum {
CLSF_TLV_ACTION = 1,
};
-#define FILTER_MAX_BUF_SIZE 100 /* Maximum size of buffer to CMD_ADD_FILTER */
-
struct filter_tlv {
- uint32_t type;
- uint32_t length;
- uint32_t val[0];
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
};
+/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
+#define FILTER_MAX_BUF_SIZE 100
+#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
+ sizeof(struct filter_action) + \
+ (2 * sizeof(struct filter_tlv)))
+
+/*
+ * Compute actual structure size given filter type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_v2)" when
+ * computing length for TLV.
+ */
+static inline u_int32_t
+vnic_filter_size(struct filter_v2 *fp)
+{
+ u_int32_t size;
+
+ switch (fp->type) {
+ case FILTER_USNIC_ID:
+ size = sizeof(fp->u.usnic);
+ break;
+ case FILTER_IPV4_5TUPLE:
+ size = sizeof(fp->u.ipv4);
+ break;
+ case FILTER_MAC_VLAN:
+ case FILTER_NVGRE_VMQ:
+ size = sizeof(fp->u.mac_vlan);
+ break;
+ case FILTER_VLAN_IP_3TUPLE:
+ size = sizeof(fp->u.vlan_3tuple);
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ size = sizeof(fp->u.generic_1);
+ break;
+ default:
+ size = sizeof(fp->u);
+ break;
+ }
+ size += sizeof(fp->type);
+ return size;
+}
+
+
enum {
CLSF_ADD = 0,
CLSF_DEL = 1,
@@ -766,8 +1014,30 @@ typedef enum {
OVERLAY_FEATURE_MAX,
} overlay_feature_t;
-#define OVERLAY_OFFLOAD_ENABLE 0
-#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE 0
+#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE_V2 2
#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+
+/*
+ * Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+typedef enum {
+ VIC_FEATURE_VXLAN,
+ VIC_FEATURE_RDMA,
+ VIC_FEATURE_MAX,
+} vic_feature_t;
+
+/*
+ * CMD_CONFIG_GRPINTR subcommands
+ */
+typedef enum {
+ GRPINTR_ENABLE = 1,
+ GRPINTR_DISABLE,
+ GRPINTR_UPD_VECT,
+} grpintr_subcmd_t;
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index 2d9104c4..f3fd39f7 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -97,6 +97,7 @@ struct vnic_rq {
struct rte_mbuf *pkt_first_seg;
struct rte_mbuf *pkt_last_seg;
unsigned int max_mbufs_per_pkt;
+ uint16_t tot_nb_desc;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 8f12b435..865cd76e 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -92,6 +92,11 @@ struct enic_fdir {
struct rte_eth_fdir_stats stats;
struct rte_hash *hash;
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
+ u32 modes;
+ u32 types_mask;
+ void (*copy_fltr_fn)(struct filter_v2 *filt,
+ struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
};
struct enic_soft_stats {
@@ -128,6 +133,7 @@ struct enic {
int link_status;
u8 hw_ip_checksum;
u16 max_mtu;
+ u16 adv_filters;
unsigned int flags;
unsigned int priv_flags;
@@ -160,6 +166,7 @@ struct enic {
/* linked list storing memory allocations */
LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
rte_spinlock_t memzone_list_lock;
+ rte_spinlock_t mtu_lock;
};
@@ -169,14 +176,22 @@ static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
return sop_idx / 2;
}
-static inline unsigned int enic_sop_rq(unsigned int rq)
+/* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
+static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
{
- return rq * 2;
+ return sop_idx / 2;
+}
+
+/* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
+{
+ return rte_idx * 2;
}
-static inline unsigned int enic_data_rq(unsigned int rq)
+/* Get the Data RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx)
{
- return rq * 2 + 1;
+ return rte_idx * 2 + 1;
}
static inline unsigned int enic_vnic_rq_count(struct enic *enic)
@@ -276,7 +291,18 @@ extern int enic_clsf_init(struct enic *enic);
extern void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
+int enic_link_update(struct enic *enic);
+void enic_fdir_info(struct enic *enic);
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
+void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
+ __rte_unused struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 111b1942..bcf479ac 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -38,6 +38,11 @@
#include <rte_malloc.h>
#include <rte_hash.h>
#include <rte_byteorder.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_eth_ctrl.h>
#include "enic_compat.h"
#include "enic.h"
@@ -67,6 +72,262 @@ void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
*stats = enic->fdir.stats;
}
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
+{
+ info->mode = (enum rte_fdir_mode)enic->fdir.modes;
+ info->flow_types_mask[0] = enic->fdir.types_mask;
+}
+
+void enic_fdir_info(struct enic *enic)
+{
+ enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ if (enic->adv_filters) {
+ enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ enic->fdir.copy_fltr_fn = copy_fltr_v2;
+ } else {
+ enic->fdir.copy_fltr_fn = copy_fltr_v1;
+ }
+}
+
+static void
+enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
+ enum filter_generic_1_layer layer, void *mask, void *val,
+ unsigned int len)
+{
+ gp->mask_flags |= flag;
+ gp->val_flags |= gp->mask_flags;
+ memcpy(gp->layer[layer].mask, mask, len);
+ memcpy(gp->layer[layer].val, val, len);
+}
+
+/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
+ * without advanced filter support.
+ */
+void
+copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks)
+{
+ fltr->type = FILTER_IPV4_5TUPLE;
+ fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.src_ip);
+ fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.dst_ip);
+ fltr->u.ipv4.src_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.src_port);
+ fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.dst_port);
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
+ fltr->u.ipv4.protocol = PROTO_TCP;
+ else
+ fltr->u.ipv4.protocol = PROTO_UDP;
+
+ fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+}
+
+/* Copy Flow Director filter to a VIC generic filter (requires advanced
+ * filter support.
+ */
+void
+copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks)
+{
+ struct filter_generic_1 *gp = &fltr->u.generic_1;
+ int i;
+
+ fltr->type = FILTER_DPDK_1;
+ memset(gp, 0, sizeof(*gp));
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp4_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp4_flow.src_port;
+ }
+ if (input->flow.udp4_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp4_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp4_flow.src_port;
+ }
+ if (input->flow.tcp4_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp4_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp4_flow.src_port;
+ }
+ if (input->flow.sctp4_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
+ }
+ if (input->flow.sctp4_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp4_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ip4_flow.proto */
+ input->flow.ip4_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
+ struct ipv4_hdr ip4_mask, ip4_val;
+ memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
+ memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+
+ if (input->flow.ip4_flow.tos) {
+ ip4_mask.type_of_service = 0xff;
+ ip4_val.type_of_service = input->flow.ip4_flow.tos;
+ }
+ if (input->flow.ip4_flow.ttl) {
+ ip4_mask.time_to_live = 0xff;
+ ip4_val.time_to_live = input->flow.ip4_flow.ttl;
+ }
+ if (input->flow.ip4_flow.proto) {
+ ip4_mask.next_proto_id = 0xff;
+ ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ }
+ if (input->flow.ip4_flow.src_ip) {
+ ip4_mask.src_addr = masks->ipv4_mask.src_ip;
+ ip4_val.src_addr = input->flow.ip4_flow.src_ip;
+ }
+ if (input->flow.ip4_flow.dst_ip) {
+ ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
+ ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
+ &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp6_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp6_flow.src_port;
+ }
+ if (input->flow.udp6_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp6_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp6_flow.src_port;
+ }
+ if (input->flow.tcp6_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp6_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp6_flow.src_port;
+ }
+ if (input->flow.sctp6_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
+ }
+ if (input->flow.sctp6_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp6_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ipv6_flow.proto */
+ input->flow.ipv6_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
+ struct ipv6_hdr ipv6_mask, ipv6_val;
+ memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
+ memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+
+ if (input->flow.ipv6_flow.proto) {
+ ipv6_mask.proto = 0xff;
+ ipv6_val.proto = input->flow.ipv6_flow.proto;
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.src_addr[i * 4] =
+ input->flow.ipv6_flow.src_ip[i];
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
+ input->flow.ipv6_flow.dst_ip[i];
+ }
+ if (input->flow.ipv6_flow.tc) {
+ ipv6_mask.vtc_flow = 0x00ff0000;
+ ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 16;
+ }
+ if (input->flow.ipv6_flow.hop_limits) {
+ ipv6_mask.hop_limits = 0xff;
+ ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
+ &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+ }
+}
+
int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
int32_t pos;
@@ -97,7 +358,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
struct enic_fdir_node *key;
- struct filter fltr = {0};
+ struct filter_v2 fltr;
int32_t pos;
u8 do_free = 0;
u16 old_fltr_id = 0;
@@ -105,9 +366,9 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
u16 flex_bytes;
u16 queue;
- flowtype_supported = (
- (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
- (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));
+ memset(&fltr, 0, sizeof(fltr));
+ flowtype_supported = enic->fdir.types_mask
+ & (1 << params->input.flow_type);
flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
(params->input.flow_ext.flexbytes[0] & 0xFF));
@@ -121,7 +382,10 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
}
/* Get the enicpmd RQ from the DPDK Rx queue */
- queue = enic_sop_rq(params->action.rx_queue);
+ queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
+
+ if (!enic->rq[queue].in_use)
+ return -EINVAL;
/* See if the key is already there in the table */
pos = rte_hash_del_key(enic->fdir.hash, params);
@@ -185,22 +449,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
key->filter = *params;
key->rq_index = queue;
- fltr.type = FILTER_IPV4_5TUPLE;
- fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.src_ip);
- fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.dst_ip);
- fltr.u.ipv4.src_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.src_port);
- fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.dst_port);
-
- if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
- fltr.u.ipv4.protocol = PROTO_TCP;
- else
- fltr.u.ipv4.protocol = PROTO_UDP;
-
- fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic->fdir.copy_fltr_fn(&fltr, &params->input,
+ &enic->rte_dev->data->dev_conf.fdir_conf.mask);
if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
key->fltr_id = queue;
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 04e7ba8d..2b154ec2 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -95,10 +95,12 @@ enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_INFO:
dev_warning(enic, "unsupported operation %u", filter_op);
ret = -ENOTSUP;
break;
+ case RTE_ETH_FILTER_INFO:
+ enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
+ break;
default:
dev_err(enic, "unknown operation %u", filter_op);
ret = -EINVAL;
@@ -152,7 +154,7 @@ static int enicpmd_dev_setup_intr(struct enic *enic)
return 0;
/* check start of packet (SOP) RQs only in case scatter is disabled. */
for (index = 0; index < enic->rq_count; index++) {
- if (!enic->rq[enic_sop_rq(index)].ctrl)
+ if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
break;
}
if (enic->rq_count != index)
@@ -260,6 +262,35 @@ static void enicpmd_dev_rx_queue_release(void *rxq)
enic_free_rq(rxq);
}
+static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint32_t queue_count = 0;
+ struct vnic_cq *cq;
+ uint32_t cq_tail;
+ uint16_t cq_idx;
+ int rq_num;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ dev_err(enic, "Invalid RX queue id=%d", rx_queue_id);
+ return 0;
+ }
+
+ rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+ cq_idx = cq->to_clean;
+
+ cq_tail = ioread32(&cq->ctrl->cq_tail);
+
+ if (cq_tail < cq_idx)
+ cq_tail += cq->ring.desc_count;
+
+ queue_count = cq_tail - cq_idx;
+
+ return queue_count;
+}
+
static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -282,7 +313,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
}
eth_dev->data->rx_queues[queue_idx] =
- (void *)&enic->rq[enic_sop_rq(queue_idx)];
+ (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
rx_conf->rx_free_thresh);
@@ -400,17 +431,9 @@ static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
__rte_unused int wait_to_complete)
{
struct enic *enic = pmd_priv(eth_dev);
- int ret;
- int link_status = 0;
ENICPMD_FUNC_TRACE();
- link_status = enic_get_link_status(enic);
- ret = (link_status == enic->link_status);
- enic->link_status = link_status;
- eth_dev->data->dev_link.link_status = link_status;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- return ret;
+ return enic_link_update(enic);
}
static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
@@ -460,6 +483,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_L4_TCP,
@@ -564,7 +589,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.tx_queue_stop = enicpmd_dev_tx_queue_stop,
.rx_queue_setup = enicpmd_dev_rx_queue_setup,
.rx_queue_release = enicpmd_dev_rx_queue_release,
- .rx_queue_count = NULL,
+ .rx_queue_count = enicpmd_dev_rx_queue_count,
.rx_descriptor_done = NULL,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
@@ -609,32 +634,14 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_enic_pmd = {
.pci_drv = {
- .name = "rte_enic_pmd",
.id_table = pci_id_enic_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_enicpmd_dev_init,
.dev_private_size = sizeof(struct enic),
};
-/* Driver initialization routine.
- * Invoked once at EAL init time.
- * Register as the [Poll Mode] Driver of Cisco ENIC device.
- */
-static int
-rte_enic_pmd_init(__rte_unused const char *name,
- __rte_unused const char *params)
-{
- ENICPMD_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_enic_pmd);
- return 0;
-}
-
-static struct rte_driver rte_enic_driver = {
- .type = PMD_PDEV,
- .init = rte_enic_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_enic_driver, enic);
-DRIVER_REGISTER_PCI_TABLE(enic, pci_id_enic_map);
+RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 7549c12e..f0b15ac1 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -240,14 +240,14 @@ void enic_init_vnic_resources(struct enic *enic)
struct vnic_rq *data_rq;
for (index = 0; index < enic->rq_count; index++) {
- cq_idx = enic_cq_rq(enic, enic_sop_rq(index));
+ cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
- vnic_rq_init(&enic->rq[enic_sop_rq(index)],
+ vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
cq_idx,
error_interrupt_enable,
error_interrupt_offset);
- data_rq = &enic->rq[enic_data_rq(index)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
if (data_rq->in_use)
vnic_rq_init(data_rq,
cq_idx,
@@ -410,14 +410,32 @@ enic_free_consistent(void *priv,
rte_free(mze);
}
+int enic_link_update(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ int ret;
+ int link_status = 0;
+
+ link_status = enic_get_link_status(enic);
+ ret = (link_status == enic->link_status);
+ enic->link_status = link_status;
+ eth_dev->data->dev_link.link_status = link_status;
+ eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+ return ret;
+}
+
static void
enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
void *arg)
{
- struct enic *enic = pmd_priv((struct rte_eth_dev *)arg);
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
+ struct enic *enic = pmd_priv(dev);
vnic_intr_return_all_credits(&enic->intr);
+ enic_link_update(enic);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
}
@@ -429,7 +447,13 @@ int enic_enable(struct enic *enic)
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
+ /* vnic notification of link status has already been turned on in
+ * enic_dev_init() which is called during probe time. Here we are
+ * just turning on interrupt vector 0 if needed.
+ */
+ if (eth_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, 0);
if (enic_clsf_init(enic))
dev_warning(enic, "Init of hash table for clsf failed."\
@@ -437,17 +461,17 @@ int enic_enable(struct enic *enic)
for (index = 0; index < enic->rq_count; index++) {
err = enic_alloc_rx_queue_mbufs(enic,
- &enic->rq[enic_sop_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
if (err) {
dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
return err;
}
err = enic_alloc_rx_queue_mbufs(enic,
- &enic->rq[enic_data_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
if (err) {
/* release the allocated mbufs for the sop rq*/
enic_rxmbuf_queue_release(enic,
- &enic->rq[enic_sop_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
dev_err(enic, "Failed to alloc data RX queue mbufs\n");
return err;
@@ -517,6 +541,9 @@ void enic_free_rq(void *rxq)
vnic_rq_free(rq_data);
vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
+
+ rq_sop->in_use = 0;
+ rq_data->in_use = 0;
}
void enic_start_wq(struct enic *enic, uint16_t queue_idx)
@@ -541,8 +568,10 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
void enic_start_rq(struct enic *enic, uint16_t queue_idx)
{
- struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
struct rte_eth_dev *eth_dev = enic->rte_dev;
if (rq_data->in_use)
@@ -556,8 +585,10 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
{
int ret1 = 0, ret2 = 0;
struct rte_eth_dev *eth_dev = enic->rte_dev;
- struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
ret2 = vnic_rq_disable(rq_sop);
rte_mb();
@@ -578,13 +609,14 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
uint16_t nb_desc, uint16_t free_thresh)
{
int rc;
- uint16_t sop_queue_idx = enic_sop_rq(queue_idx);
- uint16_t data_queue_idx = enic_data_rq(queue_idx);
+ uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+ uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
+ uint16_t mtu = enic->rte_dev->data->mtu;
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
@@ -604,9 +636,9 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
RTE_PKTMBUF_HEADROOM);
if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
- dev_info(enic, "Scatter rx mode enabled\n");
+ dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
- mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) +
+ mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
(mbuf_size - 1)) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
@@ -657,7 +689,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
}
if (mbufs_per_pkt > 1) {
dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
- enic->config.mtu, mbuf_size, min_sop + min_data,
+ mtu, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -708,6 +740,8 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
goto err_free_sop_mbuf;
}
+ rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
+
return 0;
err_free_sop_mbuf:
@@ -804,6 +838,10 @@ int enic_disable(struct enic *enic)
vnic_intr_mask(&enic->intr);
(void)vnic_intr_masked(&enic->intr); /* flush write */
+ rte_intr_disable(&enic->pdev->intr_handle);
+ rte_intr_callback_unregister(&enic->pdev->intr_handle,
+ enic_intr_handler,
+ (void *)enic->rte_dev);
vnic_dev_disable(enic->vdev);
@@ -825,8 +863,14 @@ int enic_disable(struct enic *enic)
}
}
+ /* If we were using interrupts, set the interrupt vector to -1
+ * to disable interrupts. We are not disabling link notifcations,
+ * though, as we want the polling of link status to continue working.
+ */
+ if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, -1);
+
vnic_dev_set_reset_flag(enic->vdev, 1);
- vnic_dev_notify_unset(enic->vdev);
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
@@ -928,7 +972,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
for (i = 0; i < (1 << rss_hash_bits); i++)
(*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
- enic_sop_rq(i % enic->rq_count);
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
@@ -1028,6 +1072,9 @@ static void enic_dev_deinit(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
+ /* stop link status checking */
+ vnic_dev_notify_unset(enic->vdev);
+
rte_free(eth_dev->data->mac_addrs);
}
@@ -1069,6 +1116,56 @@ int enic_set_vnic_res(struct enic *enic)
return rc;
}
+/* Initialize the completion queue for an RQ */
+static int
+enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
+{
+ struct vnic_rq *sop_rq, *data_rq;
+ unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+ int rc = 0;
+
+ sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+
+ vnic_cq_clean(&enic->cq[cq_idx]);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ 0 /* cq_message_addr */);
+
+
+ vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
+ enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
+ sop_rq->ring.desc_count - 1, 1, 0);
+ if (data_rq->in_use) {
+ vnic_rq_init_start(data_rq,
+ enic_cq_rq(enic,
+ enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
+ data_rq->ring.desc_count - 1, 1, 0);
+ }
+
+ rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
+ if (rc)
+ return rc;
+
+ if (data_rq->in_use) {
+ rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
+ if (rc) {
+ enic_rxmbuf_queue_release(enic, sop_rq);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
/* The Cisco NIC can send and receive packets up to a max packet size
* determined by the NIC type and firmware. There is also an MTU
* configured into the NIC via the CIMC/UCSM management interface
@@ -1078,6 +1175,9 @@ int enic_set_vnic_res(struct enic *enic)
*/
int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
{
+ unsigned int rq_idx;
+ struct vnic_rq *rq;
+ int rc = 0;
uint16_t old_mtu; /* previous setting */
uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
struct rte_eth_dev *eth_dev = enic->rte_dev;
@@ -1085,10 +1185,6 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
old_mtu = eth_dev->data->mtu;
config_mtu = enic->config.mtu;
- /* only works with Rx scatter disabled */
- if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter)
- return -ENOTSUP;
-
if (new_mtu > enic->max_mtu) {
dev_err(enic,
"MTU not updated: requested (%u) greater than max (%u)\n",
@@ -1106,11 +1202,83 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
+ /* The easy case is when scatter is disabled. However if the MTU
+ * becomes greater than the mbuf data size, packet drops will ensue.
+ */
+ if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+ eth_dev->data->mtu = new_mtu;
+ goto set_mtu_done;
+ }
+
+ /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
+ * change Rx scatter mode if necessary for better performance. I.e. if
+ * MTU was greater than the mbuf size and now it's less, scatter Rx
+ * doesn't have to be used and vice versa.
+ */
+ rte_spinlock_lock(&enic->mtu_lock);
+
+ /* Stop traffic on all RQs */
+ for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
+ rq = &enic->rq[rq_idx];
+ if (rq->is_sop && rq->in_use) {
+ rc = enic_stop_rq(enic,
+ enic_sop_rq_idx_to_rte_idx(rq_idx));
+ if (rc) {
+ dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
+ goto set_mtu_done;
+ }
+ }
+ }
+
+ /* replace Rx funciton with a no-op to avoid getting stale pkts */
+ eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
+ rte_mb();
+
+ /* Allow time for threads to exit the real Rx function. */
+ usleep(100000);
+
+ /* now it is safe to reconfigure the RQs */
+
/* update the mtu */
eth_dev->data->mtu = new_mtu;
+ /* free and reallocate RQs with the new MTU */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+
+ enic_free_rq(rq);
+ rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
+ rq->tot_nb_desc, rq->rx_free_thresh);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU alloc error- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+
+ rc = enic_reinit_rq(enic, rq_idx);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU RQ reinit- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+ }
+
+ /* put back the real receive function */
+ rte_mb();
+ eth_dev->rx_pkt_burst = enic_recv_pkts;
+ rte_mb();
+
+ /* restart Rx traffic */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (rq->is_sop && rq->in_use)
+ enic_start_rq(enic, rq_idx);
+ }
+
+set_mtu_done:
dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
- return 0;
+ rte_spinlock_unlock(&enic->mtu_lock);
+ return rc;
}
static int enic_dev_init(struct enic *enic)
@@ -1137,6 +1305,9 @@ static int enic_dev_init(struct enic *enic)
return -EINVAL;
}
+ /* Get the supported filters */
+ enic_fdir_info(enic);
+
eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
@@ -1147,6 +1318,9 @@ static int enic_dev_init(struct enic *enic)
vnic_dev_set_reset_flag(enic->vdev, 0);
+ /* set up link status checking */
+ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
return 0;
}
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 84c5d336..8a230a16 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -62,6 +62,7 @@ int enic_get_vnic_config(struct enic *enic)
return err;
}
+
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(enic->vdev, \
@@ -98,6 +99,10 @@ int enic_get_vnic_config(struct enic *enic)
enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
max_t(u16, ENIC_MIN_MTU, c->mtu));
+ enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
+ dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
+ ? "" : "not "));
+
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index ad596136..f762a26c 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -149,30 +149,18 @@ enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
uint8_t cqrd_flags = cqrd->flags;
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
- [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_NONFRAG,
- [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_FRAG,
- [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_NONFRAG,
- [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_FRAG,
- [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
+ [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
@@ -185,9 +173,10 @@ static inline void
enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t ciflags, bwflags, pkt_flags = 0;
+ uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
ciflags = enic_cq_rx_desc_ciflags(cqrd);
bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ vlan_tci = enic_cq_rx_desc_vlan(cqrd);
mbuf->ol_flags = 0;
@@ -195,13 +184,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
goto mbuf_flags_done;
- /* VLAN stripping */
+ /* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
} else {
- mbuf->vlan_tci = 0;
+ if (vlan_tci != 0)
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ else
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
}
+ mbuf->vlan_tci = vlan_tci;
/* RSS flag */
if (enic_cq_rx_desc_rss_type(cqrd)) {
@@ -227,6 +220,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
mbuf->ol_flags = pkt_flags;
}
+/* dummy receive function to replace actual function in
+ * order to do safe reconfiguration operations.
+ */
+uint16_t
+enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
uint16_t
enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)