summaryrefslogtreecommitdiffstats
path: root/drivers/net/enic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/enic')
-rw-r--r--drivers/net/enic/base/cq_desc.h1
-rw-r--r--drivers/net/enic/base/vnic_dev.c88
-rw-r--r--drivers/net/enic/base/vnic_dev.h12
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h38
-rw-r--r--drivers/net/enic/base/vnic_enet.h3
-rw-r--r--drivers/net/enic/base/vnic_nic.h6
-rw-r--r--drivers/net/enic/base/vnic_rq.h4
-rw-r--r--drivers/net/enic/base/vnic_wq.c9
-rw-r--r--drivers/net/enic/base/vnic_wq.h13
-rw-r--r--drivers/net/enic/enic.h142
-rw-r--r--drivers/net/enic/enic_clsf.c21
-rw-r--r--drivers/net/enic/enic_compat.h5
-rw-r--r--drivers/net/enic/enic_ethdev.c483
-rw-r--r--drivers/net/enic/enic_flow.c87
-rw-r--r--drivers/net/enic/enic_main.c641
-rw-r--r--drivers/net/enic/enic_res.c94
-rw-r--r--drivers/net/enic/enic_res.h22
-rw-r--r--drivers/net/enic/enic_rxtx.c397
-rw-r--r--drivers/net/enic/meson.build19
19 files changed, 1728 insertions, 357 deletions
diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h
index 7e138027..ae8847c6 100644
--- a/drivers/net/enic/base/cq_desc.h
+++ b/drivers/net/enic/base/cq_desc.h
@@ -38,6 +38,7 @@ struct cq_desc {
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 05b595eb..16e8814a 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -10,6 +10,7 @@
#include "vnic_dev.h"
#include "vnic_resource.h"
#include "vnic_devcmd.h"
+#include "vnic_nic.h"
#include "vnic_stats.h"
@@ -484,7 +485,7 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
* Retrun true in filter_tags if supported
*/
int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_tags)
+ u8 *filter_actions)
{
u64 args[4];
int err;
@@ -492,14 +493,10 @@ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
err = vnic_dev_advanced_filters_cap(vdev, args, 4);
- /* determine if filter tags are available */
- if (err)
- *filter_tags = 0;
- if ((args[2] == FILTER_CAP_MODE_V1) &&
- (args[3] & FILTER_ACTION_FILTER_ID_FLAG))
- *filter_tags = 1;
- else
- *filter_tags = 0;
+ /* determine supported filter actions */
+ *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
+ if (args[2] == FILTER_CAP_MODE_V1)
+ *filter_actions = args[3];
if (err || ((args[0] == 1) && (args[1] == 0))) {
/* Adv filter Command not supported or adv filters available but
@@ -531,6 +528,22 @@ parse_max_level:
return 0;
}
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak)
+{
+ u64 a0 = CMD_NIC_CFG, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *cfg_chk = false;
+ *weak = false;
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err == 0 && a0 != 0 && a1 != 0) {
+ *cfg_chk = true;
+ *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
+ }
+}
+
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
@@ -587,17 +600,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = 1000;
- static u32 instance;
- char name[NAME_MAX];
- if (!vdev->stats) {
- snprintf((char *)name, sizeof(name),
- "vnic_stats-%u", instance++);
- vdev->stats = vdev->alloc_consistent(vdev->priv,
- sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name);
- if (!vdev->stats)
- return -ENOMEM;
- }
+ if (!vdev->stats)
+ return -ENOMEM;
*stats = vdev->stats;
a0 = vdev->stats_pa;
@@ -922,6 +927,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
return vdev->intr_coal_timer_info.max_usec;
}
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
+{
+ char name[NAME_MAX];
+ static u32 instance;
+
+ snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
+ vdev->stats = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_stats),
+ &vdev->stats_pa, (u8 *)name);
+ return vdev->stats == NULL ? -ENOMEM : 0;
+}
+
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
@@ -1044,3 +1061,36 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
return ret;
}
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+ u64 a0 = overlay;
+ u64 a1 = config;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number)
+{
+ u64 a1 = vxlan_udp_port_number;
+ u64 a0 = overlay;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
+{
+ u64 a0 = VIC_FEATURE_VXLAN;
+ u64 a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
+ return ret == 0 &&
+ (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
+ (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
+}
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 8c099206..270a47bd 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -6,6 +6,8 @@
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
+#include <stdbool.h>
+
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -108,7 +110,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_tags);
+ u8 *filter_actions);
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -165,6 +169,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
int vnic_dev_get_size(void);
int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
@@ -177,10 +182,9 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_v2 *data, struct filter_action_v2 *action_v2);
-#ifdef ENIC_VXLAN
-int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
u8 overlay, u8 config);
int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number);
-#endif
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 6b95bc48..a22d8a76 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -138,9 +138,27 @@ enum vnic_devcmd_cmd {
/* del VLAN id in (u16)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
- /* nic_cfg in (u32)a0 */
+ /*
+ * nic_cfg in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+ /*
+ * nic_cfg_chk (same as nic_cfg, but may return error)
+ * in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
+ CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
@@ -600,10 +618,14 @@ enum filter_cap_mode {
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+/* flags for CMD_NIC_CFG */
+#define CMD_NIC_CFG_CAPF_UDP_WEAK (1ULL << 0) /* Bodega-style UDP RSS */
+
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
@@ -840,7 +862,9 @@ struct filter_action {
#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
+#define FILTER_ACTION_DROP_FLAG (1 << 2)
#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_DROP_FLAG \
| FILTER_ACTION_FILTER_ID_FLAG)
/* Version 2 of filter action must be a strict extension of struct filter_action
@@ -1078,6 +1102,18 @@ typedef enum {
} vic_feature_t;
/*
+ * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER
+ * to indicate the host driver about the VxLAN and Multi WQ features
+ * supported
+ */
+#define FEATURE_VXLAN_IPV6_INNER (1 << 0)
+#define FEATURE_VXLAN_IPV6_OUTER (1 << 1)
+#define FEATURE_VXLAN_MULTI_WQ (1 << 2)
+
+#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \
+ FEATURE_VXLAN_IPV6_OUTER)
+
+/*
* CMD_CONFIG_GRPINTR subcommands
*/
typedef enum {
diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h
index 26918335..901f3b46 100644
--- a/drivers/net/enic/base/vnic_enet.h
+++ b/drivers/net/enic/base/vnic_enet.h
@@ -52,6 +52,9 @@ struct vnic_enet_config {
#define VENETF_VXLAN 0x10000 /* VxLAN offload */
#define VENETF_NVGRE 0x20000 /* NVGRE offload */
#define VENETF_GRPINTR 0x40000 /* group interrupt */
+#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */
+#define VENETF_RSSHASH_UDPIPV4 0x100000 /* Hash on UDP + IPv4 fields */
+#define VENETF_RSSHASH_UDPIPV6 0x200000 /* Hash on UDP + IPv6 fields */
#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h
index a753b3a5..16040852 100644
--- a/drivers/net/enic/base/vnic_nic.h
+++ b/drivers/net/enic/base/vnic_nic.h
@@ -27,12 +27,14 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0)
#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD1 (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index d774bb0d..d8e67f74 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -6,6 +6,7 @@
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
+#include <stdbool.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
@@ -51,6 +52,8 @@ struct vnic_rq {
struct vnic_dev *vdev;
struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
+ struct rte_mbuf **free_mbufs; /* reserve of free mbufs */
+ int num_free_mbufs;
struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
unsigned int mbuf_next_idx; /* next mb to consume */
void *os_buf_head;
@@ -69,6 +72,7 @@ struct vnic_rq {
struct rte_mbuf *pkt_last_seg;
unsigned int max_mbufs_per_pkt;
uint16_t tot_nb_desc;
+ bool need_initial_post;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
index d61c4c6e..c9bf3572 100644
--- a/drivers/net/enic/base/vnic_wq.c
+++ b/drivers/net/enic/base/vnic_wq.c
@@ -32,8 +32,8 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
unsigned int count = wq->ring.desc_count;
/* Allocate the mbuf ring */
- wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
- sizeof(struct vnic_wq_buf) * count,
+ wq->bufs = (struct rte_mbuf **)rte_zmalloc_socket("wq->bufs",
+ sizeof(struct rte_mbuf *) * count,
RTE_CACHE_LINE_SIZE, wq->socket_id);
wq->head_idx = 0;
wq->tail_idx = 0;
@@ -113,6 +113,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
vnic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
+ wq->cq_pend = 0;
wq->last_completed_index = 0;
}
@@ -145,9 +146,9 @@ int vnic_wq_disable(struct vnic_wq *wq)
}
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq_buf *buf))
+ void (*buf_clean)(struct rte_mbuf **buf))
{
- struct vnic_wq_buf *buf;
+ struct rte_mbuf **buf;
unsigned int to_clean = wq->tail_idx;
buf = &wq->bufs[to_clean];
diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
index 7c069c06..236cf696 100644
--- a/drivers/net/enic/base/vnic_wq.h
+++ b/drivers/net/enic/base/vnic_wq.h
@@ -36,23 +36,20 @@ struct vnic_wq_ctrl {
u32 pad9;
};
-/* 16 bytes */
-struct vnic_wq_buf {
- struct rte_mempool *pool;
- void *mb;
-};
-
struct vnic_wq {
unsigned int index;
+ uint64_t tx_offload_notsup_mask;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
- struct vnic_wq_buf *bufs;
+ struct rte_mbuf **bufs;
unsigned int head_idx;
+ unsigned int cq_pend;
unsigned int tail_idx;
unsigned int socket_id;
const struct rte_memzone *cqmsg_rz;
uint16_t last_completed_index;
+ uint64_t offloads;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
@@ -163,5 +160,5 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq_buf *buf));
+ void (*buf_clean)(struct rte_mbuf **buf));
#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c083985e..7c27bd51 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -17,6 +17,7 @@
#include "vnic_rss.h"
#include "enic_res.h"
#include "cq_enet_desc.h"
+#include <stdbool.h>
#include <sys/queue.h>
#include <rte_spinlock.h>
@@ -49,6 +50,18 @@
#define ENICPMD_FDIR_MAX 64
+/* HW default VXLAN port */
+#define ENIC_DEFAULT_VXLAN_PORT 4789
+
+/*
+ * Interrupt 0: LSC and errors
+ * Interrupt 1: rx queue 0
+ * Interrupt 2: rx queue 1
+ * ...
+ */
+#define ENICPMD_LSC_INTR_OFFSET 0
+#define ENICPMD_RXQ_INTR_OFFSET 1
+
struct enic_fdir_node {
struct rte_eth_fdir_filter filter;
u16 fltr_id;
@@ -92,6 +105,7 @@ struct enic {
struct vnic_dev *vdev;
unsigned int port_id;
+ bool overlay_offload;
struct rte_eth_dev *rte_dev;
struct enic_fdir fdir;
char bdf_name[ENICPMD_BDF_LENGTH];
@@ -109,7 +123,13 @@ struct enic {
u16 max_mtu;
u8 adv_filters;
u32 flow_filter_mode;
- u8 filter_tags;
+ u8 filter_actions; /* HW supported actions */
+ bool vxlan;
+ bool disable_overlay; /* devargs disable_overlay=1 */
+ bool nic_cfg_chk; /* NIC_CFG_CHK available */
+ bool udp_rss_weak; /* Bodega style UDP RSS */
+ uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
+ uint16_t vxlan_port; /* current vxlan port pushed to NIC */
unsigned int flags;
unsigned int priv_flags;
@@ -126,9 +146,9 @@ struct enic {
struct vnic_cq *cq;
unsigned int cq_count; /* equals rq_count + wq_count */
- /* interrupt resource */
- struct vnic_intr intr;
- unsigned int intr_count;
+ /* interrupt vectors (len = conf_intr_count) */
+ struct vnic_intr *intr;
+ unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
/* software counters */
struct enic_soft_stats soft_stats;
@@ -146,8 +166,34 @@ struct enic {
LIST_HEAD(enic_flows, rte_flow) flows;
rte_spinlock_t flows_lock;
+
+ /* RSS */
+ uint16_t reta_size;
+ uint8_t hash_key_size;
+ uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
+ /*
+ * Keep a copy of current RSS config for queries, as we cannot retrieve
+ * it from the NIC.
+ */
+ uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
+ uint8_t rss_enable;
+ uint64_t rss_hf; /* ETH_RSS flags */
+ union vnic_rss_key rss_key;
+ union vnic_rss_cpu rss_cpu;
+
+ uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
+ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_offload_mask; /* PKT_TX flags accepted */
};
+/* Compute ethdev's max packet size from MTU */
+static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
+{
+ /* ethdev max size includes eth and crc whereas NIC MTU does not */
+ return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+}
+
/* Get the CQ index from a Start of Packet(SOP) RQ index */
static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
{
@@ -220,53 +266,61 @@ enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
return idx;
}
-extern void enic_fdir_stats_get(struct enic *enic,
- struct rte_eth_fdir_stats *stats);
-extern int enic_fdir_add_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
-extern int enic_fdir_del_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
-extern void enic_free_wq(void *txq);
-extern int enic_alloc_intr_resources(struct enic *enic);
-extern int enic_setup_finish(struct enic *enic);
-extern int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, uint16_t nb_desc);
-extern void enic_start_wq(struct enic *enic, uint16_t queue_idx);
-extern int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
-extern void enic_start_rq(struct enic *enic, uint16_t queue_idx);
-extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
-extern void enic_free_rq(void *rxq);
-extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, struct rte_mempool *mp,
- uint16_t nb_desc, uint16_t free_thresh);
-extern int enic_set_rss_nic_cfg(struct enic *enic);
-extern int enic_set_vnic_res(struct enic *enic);
-extern int enic_enable(struct enic *enic);
-extern int enic_disable(struct enic *enic);
-extern void enic_remove(struct enic *enic);
-extern int enic_get_link_status(struct enic *enic);
-extern int enic_dev_stats_get(struct enic *enic,
- struct rte_eth_stats *r_stats);
-extern void enic_dev_stats_clear(struct enic *enic);
-extern void enic_add_packet_filter(struct enic *enic);
+void enic_fdir_stats_get(struct enic *enic,
+ struct rte_eth_fdir_stats *stats);
+int enic_fdir_add_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+int enic_fdir_del_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+void enic_free_wq(void *txq);
+int enic_alloc_intr_resources(struct enic *enic);
+int enic_setup_finish(struct enic *enic);
+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc);
+void enic_start_wq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
+void enic_start_rq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
+void enic_free_rq(void *rxq);
+int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, struct rte_mempool *mp,
+ uint16_t nb_desc, uint16_t free_thresh);
+int enic_set_vnic_res(struct enic *enic);
+int enic_init_rss_nic_cfg(struct enic *enic);
+int enic_set_rss_conf(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf);
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
+int enic_set_vlan_strip(struct enic *enic);
+int enic_enable(struct enic *enic);
+int enic_disable(struct enic *enic);
+void enic_remove(struct enic *enic);
+int enic_get_link_status(struct enic *enic);
+int enic_dev_stats_get(struct enic *enic,
+ struct rte_eth_stats *r_stats);
+void enic_dev_stats_clear(struct enic *enic);
+void enic_add_packet_filter(struct enic *enic);
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
-void enic_del_mac_address(struct enic *enic, int mac_index);
-extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
-extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- struct rte_mbuf *tx_pkt, unsigned short len,
- uint8_t sop, uint8_t eop, uint8_t cq_entry,
- uint16_t ol_flags, uint16_t vlan_tag);
-
-extern void enic_post_wq_index(struct vnic_wq *wq);
-extern int enic_probe(struct enic *enic);
-extern int enic_clsf_init(struct enic *enic);
-extern void enic_clsf_destroy(struct enic *enic);
+int enic_del_mac_address(struct enic *enic, int mac_index);
+unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
+void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop, uint8_t cq_entry,
+ uint16_t ol_flags, uint16_t vlan_tag);
+
+void enic_post_wq_index(struct vnic_wq *wq);
+int enic_probe(struct enic *enic);
+int enic_clsf_init(struct enic *enic);
+void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t enic_dummy_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 3ef1d083..9d95201e 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -111,7 +111,6 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
struct rte_eth_fdir_masks *masks)
{
struct filter_generic_1 *gp = &fltr->u.generic_1;
- int i;
fltr->type = FILTER_DPDK_1;
memset(gp, 0, sizeof(*gp));
@@ -273,18 +272,14 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
ipv6_mask.proto = masks->ipv6_mask.proto;
ipv6_val.proto = input->flow.ipv6_flow.proto;
}
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.src_addr[i * 4] =
- input->flow.ipv6_flow.src_ip[i];
- }
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
- input->flow.ipv6_flow.dst_ip[i];
- }
+ memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
+ sizeof(ipv6_mask.src_addr));
+ memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
+ sizeof(ipv6_val.src_addr));
+ memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
+ sizeof(ipv6_mask.dst_addr));
+ memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
+ sizeof(ipv6_val.dst_addr));
if (input->flow.ipv6_flow.tc) {
ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index c0af1ed2..ceb1b096 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -56,6 +56,11 @@
#define dev_debug(x, args...) dev_printk(DEBUG, args)
extern int enicpmd_logtype_flow;
+extern int enicpmd_logtype_init;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
+ "%s" fmt "\n", __func__, ##args)
#define __le16 u16
#define __le32 u32
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index d84714ef..b3d57771 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -11,6 +11,7 @@
#include <rte_bus_pci.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
#include <rte_string_fns.h>
#include "vnic_intr.h"
@@ -23,10 +24,6 @@
int enicpmd_logtype_init;
int enicpmd_logtype_flow;
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
- "%s" fmt "\n", __func__, ##args)
-
#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
/*
@@ -39,9 +36,10 @@ static const struct rte_pci_id pci_id_enic_map[] = {
{.vendor_id = 0, /* sentinel */},
};
-RTE_INIT(enicpmd_init_log);
-static void
-enicpmd_init_log(void)
+#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
+#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
+
+RTE_INIT(enicpmd_init_log)
{
enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
if (enicpmd_logtype_init >= 0)
@@ -181,17 +179,21 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ struct vnic_wq *wq;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
RTE_ASSERT(queue_idx < enic->conf_wq_count);
- eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+ wq = &enic->wq[queue_idx];
+ wq->offloads = tx_conf->offloads |
+ eth_dev->data->dev_conf.txmode.offloads;
+ eth_dev->data->tx_queues[queue_idx] = (void *)wq;
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
if (ret) {
@@ -318,52 +320,40 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
return enicpmd_dev_setup_intr(enic);
}
-static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
- uint16_t vlan_id, int on)
-{
- struct enic *enic = pmd_priv(eth_dev);
- int err;
-
- ENICPMD_FUNC_TRACE();
- if (on)
- err = enic_add_vlan(enic, vlan_id);
- else
- err = enic_del_vlan(enic, vlan_id);
- return err;
-}
-
static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct enic *enic = pmd_priv(eth_dev);
+ uint64_t offloads;
ENICPMD_FUNC_TRACE();
+ offloads = eth_dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
}
- enic_set_rss_nic_cfg(enic);
-
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if ((mask & ETH_VLAN_FILTER_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
dev_warning(enic,
"Configuration of VLAN filter is not supported\n");
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
+ if ((mask & ETH_VLAN_EXTEND_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
dev_warning(enic,
"Configuration of extended VLAN is not supported\n");
}
- return 0;
+ return enic_set_vlan_strip(enic);
}
static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
{
int ret;
+ int mask;
struct enic *enic = pmd_priv(eth_dev);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -378,9 +368,21 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CHECKSUM);
- ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
-
- return ret;
+ /* All vlan offload masks to apply the current settings */
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = enicpmd_vlan_offload_set(eth_dev, mask);
+ if (ret) {
+ dev_err(enic, "Failed to configure VLAN offloads\n");
+ return ret;
+ }
+ /*
+ * Initialize RSS with the default reta and key. If the user key is
+ * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
+ * default key.
+ */
+ return enic_init_rss_nic_cfg(enic);
}
/* Start the device.
@@ -410,10 +412,9 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
ENICPMD_FUNC_TRACE();
enic_disable(enic);
+
memset(&link, 0, sizeof(link));
- rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
- *(uint64_t *)&eth_dev->data->dev_link,
- *(uint64_t *)&link);
+ rte_eth_linkstatus_set(eth_dev, &link);
}
/*
@@ -459,27 +460,52 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
device_info->min_rx_bufsize = ENIC_MIN_MTU;
- device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
+ /* "Max" mtu is not a typo. HW receives packet sizes up to the
+ * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
+ * a hint to the driver to size receive buffers accordingly so that
+ * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
+ * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+ * ignoring vNIC mtu.
+ */
+ device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
- device_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- device_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ device_info->rx_offload_capa = enic->rx_offload_capa;
+ device_info->tx_offload_capa = enic->tx_offload_capa;
+ device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
+ device_info->reta_size = enic->reta_size;
+ device_info->hash_key_size = enic->hash_key_size;
+ device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
+ device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.rq_desc_count,
+ .nb_min = ENIC_MIN_RQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ };
+ device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.wq_desc_count,
+ .nb_min = ENIC_MIN_WQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ .nb_seg_max = ENIC_TX_XMIT_MAX,
+ .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
+ };
+ device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_RX_BURST,
+ .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
+ ENIC_DEFAULT_RX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_RX_RINGS,
+ };
+ device_info->default_txportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_TX_BURST,
+ .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
+ ENIC_DEFAULT_TX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_TX_RINGS,
+ };
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
@@ -496,7 +522,8 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == enic_recv_pkts)
+ if (dev->rx_pkt_burst == enic_recv_pkts ||
+ dev->rx_pkt_burst == enic_noscatter_recv_pkts)
return ptypes;
return NULL;
}
@@ -571,7 +598,24 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
return;
ENICPMD_FUNC_TRACE();
- enic_del_mac_address(enic, index);
+ if (enic_del_mac_address(enic, index))
+ dev_err(enic, "del mac addr failed\n");
+}
+
+static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct ether_addr *addr)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ ret = enic_del_mac_address(enic, 0);
+ if (ret)
+ return ret;
+ return enic_set_mac_address(enic, addr->addr_bytes);
}
static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
@@ -582,6 +626,242 @@ static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return enic_set_mtu(enic, mtu);
}
+static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
+ enic->rss_cpu.cpu[i / 4].b[i % 4]);
+ }
+
+ return 0;
+}
+
+static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ union vnic_rss_cpu rss_cpu;
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_update: wrong reta_size. given=%u"
+ " expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+ /*
+ * Start with the current reta and modify it per reta_conf, as we
+ * need to push the entire reta even if we only modify one entry.
+ */
+ rss_cpu = enic->rss_cpu;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(
+ reta_conf[idx].reta[shift]);
+ }
+ return enic_set_rss_reta(enic, &rss_cpu);
+}
+
+static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_set_rss_conf(enic, rss_conf);
+}
+
+static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ if (rss_conf == NULL)
+ return -EINVAL;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
+ " expected=%u+\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ rss_conf->rss_hf = enic->rss_hf;
+ if (rss_conf->rss_key != NULL) {
+ int i;
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
+ rss_conf->rss_key[i] =
+ enic->rss_key.key[i / 10].b[i % 10];
+ }
+ rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ return 0;
+}
+
+static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ struct rte_eth_rxconf *conf;
+ uint16_t sop_queue_idx;
+ uint16_t data_queue_idx;
+
+ ENICPMD_FUNC_TRACE();
+ sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+ rq_sop = &enic->rq[sop_queue_idx];
+ rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
+ qinfo->mp = rq_sop->mp;
+ qinfo->scattered_rx = rq_sop->data_queue_enable;
+ qinfo->nb_desc = rq_sop->ring.desc_count;
+ if (qinfo->scattered_rx)
+ qinfo->nb_desc += rq_data->ring.desc_count;
+ conf = &qinfo->conf;
+ memset(conf, 0, sizeof(*conf));
+ conf->rx_free_thresh = rq_sop->rx_free_thresh;
+ conf->rx_drop_en = 1;
+ /*
+ * Except VLAN stripping (port setting), all the checksum offloads
+ * are always enabled.
+ */
+ conf->offloads = enic->rx_offload_capa;
+ if (!enic->ig_vlan_strip_en)
+ conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ /* rx_thresh and other fields are not applicable for enic */
+}
+
+static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct vnic_wq *wq = &enic->wq[tx_queue_id];
+
+ ENICPMD_FUNC_TRACE();
+ qinfo->nb_desc = wq->ring.desc_count;
+ memset(&qinfo->conf, 0, sizeof(qinfo->conf));
+ qinfo->conf.offloads = wq->offloads;
+ /* tx_thresh, and all the other fields are not applicable for enic */
+}
+
+static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
+static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
+static int udp_tunnel_common_check(struct enic *enic,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
+ return -ENOTSUP;
+ if (!enic->overlay_offload) {
+ PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
+ "supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int update_vxlan_port(struct enic *enic, uint16_t port)
+{
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ port)) {
+ PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
+ enic->vxlan_port = port;
+ return 0;
+}
+
+static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * The NIC has 1 configurable VXLAN port number. "Adding" a new port
+ * number replaces it.
+ */
+ if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
+ PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, tnl->udp_port);
+}
+
+static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * Clear the previously set port number and restore the
+ * hardware default port number. Some drivers disable VXLAN
+ * offloads when there are no configured port numbers. But
+ * enic does not do that as VXLAN is part of overlay offload,
+ * which is tied to inner RSS and TSO.
+ */
+ if (tnl->udp_port != enic->vxlan_port) {
+ PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
+}
+
static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_configure = enicpmd_dev_configure,
.dev_start = enicpmd_dev_start,
@@ -600,7 +880,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_infos_get = enicpmd_dev_info_get,
.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
.mtu_set = enicpmd_mtu_set,
- .vlan_filter_set = enicpmd_vlan_filter_set,
+ .vlan_filter_set = NULL,
.vlan_tpid_set = NULL,
.vlan_offload_set = enicpmd_vlan_offload_set,
.vlan_strip_queue_set = NULL,
@@ -614,6 +894,10 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.rx_descriptor_done = NULL,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
+ .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
+ .rxq_info_get = enicpmd_dev_rxq_info_get,
+ .txq_info_get = enicpmd_dev_txq_info_get,
.dev_led_on = NULL,
.dev_led_off = NULL,
.flow_ctrl_get = NULL,
@@ -621,9 +905,97 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.priority_flow_ctrl_set = NULL,
.mac_addr_add = enicpmd_add_mac_addr,
.mac_addr_remove = enicpmd_remove_mac_addr,
+ .mac_addr_set = enicpmd_set_mac_addr,
.filter_ctrl = enicpmd_dev_filter_ctrl,
+ .reta_query = enicpmd_dev_rss_reta_query,
+ .reta_update = enicpmd_dev_rss_reta_update,
+ .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
+ .rss_hash_update = enicpmd_dev_rss_hash_update,
+ .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
};
+static int enic_parse_disable_overlay(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "0") == 0) {
+ enic->disable_overlay = false;
+ } else if (strcmp(value, "1") == 0) {
+ enic->disable_overlay = true;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
+ ": expected=0|1 given=%s\n", value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "trunk") == 0) {
+ /* Trunk mode: always tag */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
+ } else if (strcmp(value, "untag") == 0) {
+ /* Untag default VLAN mode: untag if VLAN = default VLAN */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "priority") == 0) {
+ /*
+ * Priority-tag default VLAN mode: priority tag (VLAN header
+ * with ID=0) if VLAN = default
+ */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "pass") == 0) {
+ /* Pass through mode: do not touch tags */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
+ ": expected=trunk|untag|priority|pass given=%s\n",
+ value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_check_devargs(struct rte_eth_dev *dev)
+{
+ static const char *const valid_keys[] = {
+ ENIC_DEVARG_DISABLE_OVERLAY,
+ ENIC_DEVARG_IG_VLAN_REWRITE,
+ NULL};
+ struct enic *enic = pmd_priv(dev);
+ struct rte_kvargs *kvlist;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->disable_overlay = false;
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ if (!dev->device->devargs)
+ return 0;
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+ if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
+ enic_parse_disable_overlay, enic) < 0 ||
+ rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
+ enic_parse_ig_vlan_rewrite, enic) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
@@ -633,6 +1005,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pdev;
struct rte_pci_addr *addr;
struct enic *enic = pmd_priv(eth_dev);
+ int err;
ENICPMD_FUNC_TRACE();
@@ -651,6 +1024,9 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
addr->domain, addr->bus, addr->devid, addr->function);
+ err = enic_check_devargs(eth_dev);
+ if (err)
+ return err;
return enic_probe(enic);
}
@@ -676,3 +1052,6 @@ static struct rte_pci_driver rte_enic_pmd = {
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_enic,
+ ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
+ ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index 28923b0e..0cf04aef 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -3,6 +3,7 @@
*/
#include <errno.h>
+#include <stdint.h>
#include <rte_log.h>
#include <rte_ethdev_driver.h>
#include <rte_flow_driver.h>
@@ -273,21 +274,33 @@ static const enum rte_flow_action_type enic_supported_actions_v1[] = {
};
/** Supported actions for newer NICs */
-static const enum rte_flow_action_type enic_supported_actions_v2[] = {
+static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_MARK,
RTE_FLOW_ACTION_TYPE_FLAG,
RTE_FLOW_ACTION_TYPE_END,
};
+static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
/** Action capabilities indexed by NIC version information */
static const struct enic_action_cap enic_action_cap[] = {
[FILTER_ACTION_RQ_STEERING_FLAG] = {
.actions = enic_supported_actions_v1,
.copy_fn = enic_copy_action_v1,
},
- [FILTER_ACTION_V2_ALL] = {
- .actions = enic_supported_actions_v2,
+ [FILTER_ACTION_FILTER_ID_FLAG] = {
+ .actions = enic_supported_actions_v2_id,
+ .copy_fn = enic_copy_action_v2,
+ },
+ [FILTER_ACTION_DROP_FLAG] = {
+ .actions = enic_supported_actions_v2_drop,
.copy_fn = enic_copy_action_v2,
},
};
@@ -544,16 +557,21 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item,
if (!spec)
return 0;
- /* Don't support filtering in tpid */
- if (mask) {
- if (mask->tpid != 0)
- return ENOTSUP;
- } else {
+ if (!mask)
mask = &rte_flow_item_vlan_mask;
- RTE_ASSERT(mask->tpid == 0);
- }
if (*inner_ofst == 0) {
+ struct ether_hdr *eth_mask =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
+ struct ether_hdr *eth_val =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+
+ /* Outer TPID cannot be matched */
+ if (eth_mask->ether_type)
+ return ENOTSUP;
+ eth_mask->ether_type = mask->inner_type;
+ eth_val->ether_type = spec->inner_type;
+
/* Outer header. Use the vlan mask/val fields */
gp->mask_vlan = mask->tci;
gp->val_vlan = spec->tci;
@@ -952,6 +970,9 @@ static int
enic_copy_action_v1(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action)
{
+ enum { FATE = 1, };
+ uint32_t overlap = 0;
+
FLOW_TRACE();
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -963,6 +984,10 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
enic_action->rq_idx =
enic_rte_rq_idx_to_sop_idx(queue->index);
break;
@@ -972,6 +997,8 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
break;
}
}
+ if (!(overlap & FATE))
+ return ENOTSUP;
enic_action->type = FILTER_ACTION_RQ_STEERING;
return 0;
}
@@ -989,6 +1016,9 @@ static int
enic_copy_action_v2(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action)
{
+ enum { FATE = 1, MARK = 2, };
+ uint32_t overlap = 0;
+
FLOW_TRACE();
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -997,6 +1027,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
enic_action->rq_idx =
enic_rte_rq_idx_to_sop_idx(queue->index);
enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
@@ -1007,6 +1041,9 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
(const struct rte_flow_action_mark *)
actions->conf;
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
/* ENIC_MAGIC_FILTER_ID is reserved and is the highest
* in the range of allows mark ids.
*/
@@ -1017,10 +1054,20 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
break;
}
case RTE_FLOW_ACTION_TYPE_FLAG: {
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
break;
}
+ case RTE_FLOW_ACTION_TYPE_DROP: {
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
+ enic_action->flags |= FILTER_ACTION_DROP_FLAG;
+ break;
+ }
case RTE_FLOW_ACTION_TYPE_VOID:
continue;
default:
@@ -1028,6 +1075,8 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
break;
}
}
+ if (!(overlap & FATE))
+ return ENOTSUP;
enic_action->type = FILTER_ACTION_V2;
return 0;
}
@@ -1059,10 +1108,14 @@ enic_get_filter_cap(struct enic *enic)
static const struct enic_action_cap *
enic_get_action_cap(struct enic *enic)
{
- static const struct enic_action_cap *ea;
-
- if (enic->filter_tags)
- ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
+ const struct enic_action_cap *ea;
+ uint8_t actions;
+
+ actions = enic->filter_actions;
+ if (actions & FILTER_ACTION_DROP_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
+ else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
else
ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
return ea;
@@ -1268,6 +1321,12 @@ enic_flow_parse(struct rte_eth_dev *dev,
NULL,
"egress is not supported");
return -rte_errno;
+ } else if (attrs->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ return -rte_errno;
} else if (!attrs->ingress) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index ec9d343f..fd940c58 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -69,12 +69,12 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
}
}
-static void enic_free_wq_buf(struct vnic_wq_buf *buf)
+static void enic_free_wq_buf(struct rte_mbuf **buf)
{
- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
+ struct rte_mbuf *mbuf = *buf;
rte_pktmbuf_free_seg(mbuf);
- buf->mb = NULL;
+ *buf = NULL;
}
static void enic_log_q_error(struct enic *enic)
@@ -162,13 +162,12 @@ int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
return 0;
}
-void enic_del_mac_address(struct enic *enic, int mac_index)
+int enic_del_mac_address(struct enic *enic, int mac_index)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
- if (vnic_dev_del_addr(enic->vdev, mac_addr))
- dev_err(enic, "del mac addr failed\n");
+ return vnic_dev_del_addr(enic->vdev, mac_addr);
}
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
@@ -200,10 +199,15 @@ void enic_init_vnic_resources(struct enic *enic)
{
unsigned int error_interrupt_enable = 1;
unsigned int error_interrupt_offset = 0;
+ unsigned int rxq_interrupt_enable = 0;
+ unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
unsigned int index = 0;
unsigned int cq_idx;
struct vnic_rq *data_rq;
+ if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ rxq_interrupt_enable = 1;
+
for (index = 0; index < enic->rq_count; index++) {
cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
@@ -225,11 +229,13 @@ void enic_init_vnic_resources(struct enic *enic)
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
- 0 /* interrupt_enable */,
+ rxq_interrupt_enable,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
- 0 /* interrupt offset */,
+ rxq_interrupt_offset,
0 /* cq_message_addr */);
+ if (rxq_interrupt_enable)
+ rxq_interrupt_offset++;
}
for (index = 0; index < enic->wq_count; index++) {
@@ -237,6 +243,9 @@ void enic_init_vnic_resources(struct enic *enic)
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask =
+ PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
@@ -252,10 +261,12 @@ void enic_init_vnic_resources(struct enic *enic)
(u64)enic->wq[index].cqmsg_rz->iova);
}
- vnic_intr_init(&enic->intr,
- enic->config.intr_timer_usec,
- enic->config.intr_timer_type,
- /*mask_on_assertion*/1);
+ for (index = 0; index < enic->intr_count; index++) {
+ vnic_intr_init(&enic->intr[index],
+ enic->config.intr_timer_usec,
+ enic->config.intr_timer_type,
+ /*mask_on_assertion*/1);
+ }
}
@@ -266,6 +277,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
struct rq_enet_desc *rqd = rq->ring.descs;
unsigned i;
dma_addr_t dma_addr;
+ uint32_t max_rx_pkt_len;
+ uint16_t rq_buf_len;
if (!rq->in_use)
return 0;
@@ -273,6 +286,18 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
rq->ring.desc_count);
+ /*
+ * If *not* using scatter and the mbuf size is greater than the
+ * requested max packet size (max_rx_pkt_len), then reduce the
+ * posted buffer size to max_rx_pkt_len. HW still receives packets
+ * larger than max_rx_pkt_len, but they will be truncated, which we
+ * drop in the rx handler. Not ideal, but better than returning
+ * large packets when the user is not expecting them.
+ */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
+ if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
+ rq_buf_len = max_rx_pkt_len;
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
mb = rte_mbuf_raw_alloc(rq->mp);
if (mb == NULL) {
@@ -287,9 +312,29 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
rq_enet_desc_enc(rqd, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
: RQ_ENET_TYPE_NOT_SOP),
- mb->buf_len - RTE_PKTMBUF_HEADROOM);
+ rq_buf_len);
rq->mbuf_ring[i] = mb;
}
+ /*
+ * Do not post the buffers to the NIC until we enable the RQ via
+ * enic_start_rq().
+ */
+ rq->need_initial_post = true;
+ /* Initialize fetch index while RQ is disabled */
+ iowrite32(0, &rq->ctrl->fetch_index);
+ return 0;
+}
+
+/*
+ * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
+ * allocated the buffers and filled the RQ descriptor ring. Just need to push
+ * the post index to the NIC.
+ */
+static void
+enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
+{
+ if (!rq->in_use || !rq->need_initial_post)
+ return;
/* make sure all prior writes are complete before doing the PIO write */
rte_rmb();
@@ -302,11 +347,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
iowrite32(rq->posted_index, &rq->ctrl->posted_index);
- iowrite32(0, &rq->ctrl->fetch_index);
rte_rmb();
-
- return 0;
-
+ rq->need_initial_post = false;
}
static void *
@@ -319,8 +361,8 @@ enic_alloc_consistent(void *priv, size_t size,
struct enic *enic = (struct enic *)priv;
struct enic_memzone_entry *mze;
- rz = rte_memzone_reserve_aligned((const char *)name,
- size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
+ rz = rte_memzone_reserve_aligned((const char *)name, size,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
@@ -379,16 +421,14 @@ enic_free_consistent(void *priv,
int enic_link_update(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
- int ret;
- int link_status = 0;
+ struct rte_eth_link link;
- link_status = enic_get_link_status(enic);
- ret = (link_status == enic->link_status);
- enic->link_status = link_status;
- eth_dev->data->dev_link.link_status = link_status;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- return ret;
+ memset(&link, 0, sizeof(link));
+ link.link_status = enic_get_link_status(enic);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = vnic_dev_port_speed(enic->vdev);
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
static void
@@ -397,13 +437,98 @@ enic_intr_handler(void *arg)
struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
struct enic *enic = pmd_priv(dev);
- vnic_intr_return_all_credits(&enic->intr);
+ vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
enic_link_update(enic);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
}
+static int enic_rxq_intr_init(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+ uint32_t rxq_intr_count, i;
+ int err;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ /*
+ * Rx queue interrupts only work when we have MSI-X interrupts,
+ * one per queue. Sharing one interrupt is technically
+ * possible with VIC, but it is not worth the complications it brings.
+ */
+ if (!rte_intr_cap_multiple(intr_handle)) {
+ dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
+ " (vfio-pci driver)\n");
+ return -ENOTSUP;
+ }
+ rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
+ err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
+ if (err) {
+ dev_err(enic, "Failed to enable event fds for Rx queue"
+ " interrupts\n");
+ return err;
+ }
+ intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
+ rxq_intr_count * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ dev_err(enic, "Failed to allocate intr_vec\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < rxq_intr_count; i++)
+ intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
+ return 0;
+}
+
+static void enic_rxq_intr_deinit(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
+{
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ unsigned int i;
+
+ /*
+ * Fill WQ descriptor fields that never change. Every descriptor is
+ * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
+ * descriptors (i.e. request one completion update every 32 packets).
+ */
+ wq = &enic->wq[queue_idx];
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ for (i = 0; i < wq->ring.desc_count; i++, desc++) {
+ desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
+ if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
+ desc->header_length_flags |=
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
+ }
+}
+
+static void pick_rx_handler(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev;
+
+ /* Use the non-scatter, simplified RX handler if possible. */
+ eth_dev = enic->rte_dev;
+ if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
+ PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
+ eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the normal Rx handler");
+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
+ }
+}
+
int enic_enable(struct enic *enic)
{
unsigned int index;
@@ -420,6 +545,9 @@ int enic_enable(struct enic *enic)
if (eth_dev->data->dev_conf.intr_conf.lsc)
vnic_dev_notify_set(enic->vdev, 0);
+ err = enic_rxq_intr_init(enic);
+ if (err)
+ return err;
if (enic_clsf_init(enic))
dev_warning(enic, "Init of hash table for clsf failed."\
"Flow director feature will not work\n");
@@ -443,6 +571,22 @@ int enic_enable(struct enic *enic)
}
}
+ /*
+ * Use the simple TX handler if possible. All offloads must be
+ * disabled.
+ */
+ if (eth_dev->data->dev_conf.txmode.offloads == 0) {
+ PMD_INIT_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ for (index = 0; index < enic->wq_count; index++)
+ enic_prep_wq_for_simple_tx(enic, index);
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
+
+ pick_rx_handler(enic);
+
for (index = 0; index < enic->wq_count; index++)
enic_start_wq(enic, index);
for (index = 0; index < enic->rq_count; index++)
@@ -457,7 +601,8 @@ int enic_enable(struct enic *enic)
enic_intr_handler, (void *)enic->rte_dev);
rte_intr_enable(&(enic->pdev->intr_handle));
- vnic_intr_unmask(&enic->intr);
+ /* Unmask LSC interrupt */
+ vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
return 0;
}
@@ -465,17 +610,21 @@ int enic_enable(struct enic *enic)
int enic_alloc_intr_resources(struct enic *enic)
{
int err;
+ unsigned int i;
dev_info(enic, "vNIC resources used: "\
"wq %d rq %d cq %d intr %d\n",
enic->wq_count, enic_vnic_rq_count(enic),
enic->cq_count, enic->intr_count);
- err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
- if (err)
- enic_free_vnic_resources(enic);
-
- return err;
+ for (i = 0; i < enic->intr_count; i++) {
+ err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+ if (err) {
+ enic_free_vnic_resources(enic);
+ return err;
+ }
+ }
+ return 0;
}
void enic_free_rq(void *rxq)
@@ -490,6 +639,19 @@ void enic_free_rq(void *rxq)
enic = vnic_dev_priv(rq_sop->vdev);
rq_data = &enic->rq[rq_sop->data_queue_idx];
+ if (rq_sop->free_mbufs) {
+ struct rte_mbuf **mb;
+ int i;
+
+ mb = rq_sop->free_mbufs;
+ for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
+ i < ENIC_RX_BURST_MAX; i++)
+ rte_pktmbuf_free(mb[i]);
+ rte_free(rq_sop->free_mbufs);
+ rq_sop->free_mbufs = NULL;
+ rq_sop->num_free_mbufs = 0;
+ }
+
enic_rxmbuf_queue_release(enic, rq_sop);
if (rq_data->in_use)
enic_rxmbuf_queue_release(enic, rq_data);
@@ -539,10 +701,13 @@ void enic_start_rq(struct enic *enic, uint16_t queue_idx)
rq_data = &enic->rq[rq_sop->data_queue_idx];
struct rte_eth_dev *eth_dev = enic->rte_dev;
- if (rq_data->in_use)
+ if (rq_data->in_use) {
vnic_rq_enable(rq_data);
+ enic_initial_post_rx(enic, rq_data);
+ }
rte_mb();
vnic_rq_enable(rq_sop);
+ enic_initial_post_rx(enic, rq_sop);
eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -581,7 +746,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
- uint16_t mtu = enic->rte_dev->data->mtu;
+ uint32_t max_rx_pkt_len;
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
@@ -599,22 +764,42 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
+ /* max_rx_pkt_len includes the ethernet header and CRC. */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
- /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
- mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
- (mbuf_size - 1)) / mbuf_size;
+ /* ceil((max pkt len)/mbuf_size) */
+ mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
mbufs_per_pkt = 1;
+ if (max_rx_pkt_len > mbuf_size) {
+ dev_warning(enic, "The maximum Rx packet size (%u) is"
+ " larger than the mbuf size (%u), and"
+ " scatter is disabled. Larger packets will"
+ " be truncated.\n",
+ max_rx_pkt_len, mbuf_size);
+ }
}
if (mbufs_per_pkt > 1) {
dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
rq_sop->data_queue_enable = 1;
rq_data->in_use = 1;
+ /*
+ * HW does not directly support rxmode.max_rx_pkt_len. HW always
+ * receives packet sizes up to the "max" MTU.
+ * If not using scatter, we can achieve the effect of dropping
+ * larger packets by reducing the size of posted buffers.
+ * See enic_alloc_rx_queue_mbufs().
+ */
+ if (max_rx_pkt_len <
+ enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
+ dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
+ " when scatter rx mode is in use.\n");
+ }
} else {
dev_info(enic, "Rq %u Scatter rx mode not being used\n",
queue_idx);
@@ -623,20 +808,20 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
}
/* number of descriptors have to be a multiple of 32 */
- nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
- nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK;
+ nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK;
rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
if (mbufs_per_pkt > 1) {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = ((enic->config.rq_desc_count /
- (mbufs_per_pkt - 1)) & ~0x1F);
+ (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
min_data = min_sop * (mbufs_per_pkt - 1);
max_data = enic->config.rq_desc_count;
} else {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = enic->config.rq_desc_count;
min_data = 0;
max_data = 0;
@@ -654,8 +839,9 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
nb_data_desc = max_data;
}
if (mbufs_per_pkt > 1) {
- dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
- mtu, mbuf_size, min_sop + min_data,
+ dev_info(enic, "For max packet size %u and mbuf size %u valid"
+ " rx descriptor range is %u to %u\n",
+ max_rx_pkt_len, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -706,10 +892,21 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
goto err_free_sop_mbuf;
}
+ rq_sop->free_mbufs = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->free_mbufs",
+ sizeof(struct rte_mbuf *) *
+ ENIC_RX_BURST_MAX,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_sop->free_mbufs == NULL)
+ goto err_free_data_mbuf;
+ rq_sop->num_free_mbufs = 0;
+
rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
return 0;
+err_free_data_mbuf:
+ rte_free(rq_data->mbuf_ring);
err_free_sop_mbuf:
rte_free(rq_sop->mbuf_ring);
err_free_cq:
@@ -749,25 +946,15 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
static int instance;
wq->socket_id = socket_id;
- if (nb_desc) {
- if (nb_desc > enic->config.wq_desc_count) {
- dev_warning(enic,
- "WQ %d - number of tx desc in cmd line (%d)"\
- "is greater than that in the UCSM/CIMC adapter"\
- "policy. Applying the value in the adapter "\
- "policy (%d)\n",
- queue_idx, nb_desc, enic->config.wq_desc_count);
- } else if (nb_desc != enic->config.wq_desc_count) {
- enic->config.wq_desc_count = nb_desc;
- dev_info(enic,
- "TX Queues - effective number of descs:%d\n",
- nb_desc);
- }
- }
+ /*
+ * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
+ * print an info message for diagnostics.
+ */
+ dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc);
/* Allocate queue resources */
err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
- enic->config.wq_desc_count,
+ nb_desc,
sizeof(struct wq_enet_desc));
if (err) {
dev_err(enic, "error in allocation of wq\n");
@@ -775,7 +962,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
}
err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
- socket_id, enic->config.wq_desc_count,
+ socket_id, nb_desc,
sizeof(struct cq_enet_wq_desc));
if (err) {
vnic_wq_free(wq);
@@ -788,9 +975,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
instance++);
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
- sizeof(uint32_t),
- SOCKET_ID_ANY, 0,
- ENIC_ALIGN);
+ sizeof(uint32_t), SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!wq->cqmsg_rz)
return -ENOMEM;
@@ -802,8 +988,11 @@ int enic_disable(struct enic *enic)
unsigned int i;
int err;
- vnic_intr_mask(&enic->intr);
- (void)vnic_intr_masked(&enic->intr); /* flush write */
+ for (i = 0; i < enic->intr_count; i++) {
+ vnic_intr_mask(&enic->intr[i]);
+ (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+ }
+ enic_rxq_intr_deinit(enic);
rte_intr_disable(&enic->pdev->intr_handle);
rte_intr_callback_unregister(&enic->pdev->intr_handle,
enic_intr_handler,
@@ -846,7 +1035,8 @@ int enic_disable(struct enic *enic)
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
- vnic_intr_clean(&enic->intr);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_clean(&enic->intr[i]);
return 0;
}
@@ -879,9 +1069,10 @@ static int enic_dev_wait(struct vnic_dev *vdev,
static int enic_dev_open(struct enic *enic)
{
int err;
+ int flags = CMD_OPENF_IG_DESCCACHE;
err = enic_dev_wait(enic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, flags);
if (err)
dev_err(enic_get_dev(enic),
"vNIC device open failed, err %d\n", err);
@@ -889,44 +1080,42 @@ static int enic_dev_open(struct enic *enic)
return err;
}
-static int enic_set_rsskey(struct enic *enic)
+static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
{
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
- static union vnic_rss_key rss_key = {
- .key = {
- [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
- [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
- [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
- [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
- }
- };
- int err;
+ int err, i;
u8 name[NAME_MAX];
+ RTE_ASSERT(user_key != NULL);
snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
return -ENOMEM;
- rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
+ rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
err = enic_set_rss_key(enic,
rss_key_buf_pa,
sizeof(union vnic_rss_key));
+ /* Save for later queries */
+ if (!err) {
+ rte_memcpy(&enic->rss_key, rss_key_buf_va,
+ sizeof(union vnic_rss_key));
+ }
enic_free_consistent(enic, sizeof(union vnic_rss_key),
rss_key_buf_va, rss_key_buf_pa);
return err;
}
-static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
{
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
- int i;
int err;
u8 name[NAME_MAX];
@@ -936,9 +1125,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
if (!rss_cpu_buf_va)
return -ENOMEM;
- for (i = 0; i < (1 << rss_hash_bits); i++)
- (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
- enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
+ rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
@@ -947,6 +1134,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
rss_cpu_buf_va, rss_cpu_buf_pa);
+ /* Save for later queries */
+ if (!err)
+ rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
return err;
}
@@ -956,8 +1146,6 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
const u8 tso_ipid_split_en = 0;
int err;
- /* Enable VLAN tag stripping */
-
err = enic_set_nic_cfg(enic,
rss_default_cpu, rss_hash_type,
rss_hash_bits, rss_base_cpu,
@@ -967,47 +1155,50 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
return err;
}
-int enic_set_rss_nic_cfg(struct enic *enic)
+/* Initialize RSS with defaults, called from dev_configure */
+int enic_init_rss_nic_cfg(struct enic *enic)
{
- const u8 rss_default_cpu = 0;
- const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
- const u8 rss_hash_bits = 7;
- const u8 rss_base_cpu = 0;
- u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
-
- if (rss_enable) {
- if (!enic_set_rsskey(enic)) {
- if (enic_set_rsscpu(enic, rss_hash_bits)) {
- rss_enable = 0;
- dev_warning(enic, "RSS disabled, "\
- "Failed to set RSS cpu indirection table.");
- }
- } else {
- rss_enable = 0;
- dev_warning(enic,
- "RSS disabled, Failed to set RSS key.\n");
+ static uint8_t default_rss_key[] = {
+ 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
+ 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
+ 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
+ 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
+ };
+ struct rte_eth_rss_conf rss_conf;
+ union vnic_rss_cpu rss_cpu;
+ int ret, i;
+
+ rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ /*
+ * If setting key for the first time, and the user gives us none, then
+ * push the default key to NIC.
+ */
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = default_rss_key;
+ rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ ret = enic_set_rss_conf(enic, &rss_conf);
+ if (ret) {
+ dev_err(enic, "Failed to configure RSS\n");
+ return ret;
+ }
+ if (enic->rss_enable) {
+ /* If enabling RSS, use the default reta */
+ for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
}
+ ret = enic_set_rss_reta(enic, &rss_cpu);
+ if (ret)
+ dev_err(enic, "Failed to set RSS indirection table\n");
}
-
- return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
- rss_hash_bits, rss_base_cpu, rss_enable);
+ return ret;
}
int enic_setup_finish(struct enic *enic)
{
- int ret;
-
enic_init_soft_stats(enic);
- ret = enic_set_rss_nic_cfg(enic);
- if (ret) {
- dev_err(enic, "Failed to config nic, aborting.\n");
- return -1;
- }
-
/* Default conf */
vnic_dev_packet_filter(enic->vdev,
1 /* directed */,
@@ -1022,6 +1213,115 @@ int enic_setup_finish(struct enic *enic)
return 0;
}
+static int enic_rss_conf_valid(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ /* RSS is disabled per VIC settings. Ignore rss_conf. */
+ if (enic->flow_type_rss_offloads == 0)
+ return 0;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ if (rss_conf->rss_hf != 0 &&
+ (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
+ dev_err(enic, "Given rss_hf contains none of the supported"
+ " types\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Set hash type and key according to rss_conf */
+int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *eth_dev;
+ uint64_t rss_hf;
+ u8 rss_hash_type;
+ u8 rss_enable;
+ int ret;
+
+ RTE_ASSERT(rss_conf != NULL);
+ ret = enic_rss_conf_valid(enic, rss_conf);
+ if (ret) {
+ dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
+ return ret;
+ }
+
+ eth_dev = enic->rte_dev;
+ rss_hash_type = 0;
+ rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
+ if (enic->rq_count > 1 &&
+ (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+ rss_hf != 0) {
+ rss_enable = 1;
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
+ if (enic->udp_rss_weak) {
+ /*
+ * 'TCP' is not a typo. The "weak" version of
+ * UDP RSS requires both the TCP and UDP bits
+ * be set. It does enable TCP RSS as well.
+ */
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ }
+ }
+ if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
+ if (enic->udp_rss_weak)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ }
+ } else {
+ rss_enable = 0;
+ rss_hf = 0;
+ }
+
+ /* Set the hash key if provided */
+ if (rss_enable && rss_conf->rss_key) {
+ ret = enic_set_rsskey(enic, rss_conf->rss_key);
+ if (ret) {
+ dev_err(enic, "Failed to set RSS key\n");
+ return ret;
+ }
+ }
+
+ ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ rss_enable);
+ if (!ret) {
+ enic->rss_hf = rss_hf;
+ enic->rss_hash_type = rss_hash_type;
+ enic->rss_enable = rss_enable;
+ } else {
+ dev_err(enic, "Failed to update RSS configurations."
+ " hash=0x%x\n", rss_hash_type);
+ }
+ return ret;
+}
+
+int enic_set_vlan_strip(struct enic *enic)
+{
+ /*
+ * Unfortunately, VLAN strip on/off and RSS on/off are configured
+ * together. So, re-do niccfg, preserving the current RSS settings.
+ */
+ return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ enic->rss_enable);
+}
+
void enic_add_packet_filter(struct enic *enic)
{
/* Args -> directed, multicast, broadcast, promisc, allmulti */
@@ -1043,6 +1343,7 @@ static void enic_dev_deinit(struct enic *enic)
rte_free(eth_dev->data->mac_addrs);
rte_free(enic->cq);
+ rte_free(enic->intr);
rte_free(enic->rq);
rte_free(enic->wq);
}
@@ -1052,12 +1353,16 @@ int enic_set_vnic_res(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
- unsigned int required_rq, required_wq, required_cq;
+ unsigned int required_rq, required_wq, required_cq, required_intr;
/* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
required_rq = eth_dev->data->nb_rx_queues * 2;
required_wq = eth_dev->data->nb_tx_queues;
required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+ required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ required_intr += eth_dev->data->nb_rx_queues;
+ }
if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
@@ -1076,11 +1381,18 @@ int enic_set_vnic_res(struct enic *enic)
required_cq, enic->conf_cq_count);
rc = -EINVAL;
}
+ if (enic->conf_intr_count < required_intr) {
+ dev_err(dev, "Not enough Interrupts to support Rx queue"
+ " interrupts. Required:%u, Configured:%u\n",
+ required_intr, enic->conf_intr_count);
+ rc = -EINVAL;
+ }
if (rc == 0) {
enic->rq_count = eth_dev->data->nb_rx_queues;
enic->wq_count = eth_dev->data->nb_tx_queues;
enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = required_intr;
}
return rc;
@@ -1176,20 +1488,26 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
- /* The easy case is when scatter is disabled. However if the MTU
- * becomes greater than the mbuf data size, packet drops will ensue.
+ /* Update the MTU and maximum packet length */
+ eth_dev->data->mtu = new_mtu;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ enic_mtu_to_max_rx_pktlen(new_mtu);
+
+ /*
+ * If the device has not started (enic_enable), nothing to do.
+ * Later, enic_enable() will set up RQs reflecting the new maximum
+ * packet length.
*/
- if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SCATTER)) {
- eth_dev->data->mtu = new_mtu;
+ if (!eth_dev->data->dev_started)
goto set_mtu_done;
- }
- /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
- * change Rx scatter mode if necessary for better performance. I.e. if
- * MTU was greater than the mbuf size and now it's less, scatter Rx
- * doesn't have to be used and vice versa.
- */
+ /*
+ * The device has started, re-do RQs on the fly. In the process, we
+ * pick up the new maximum packet length.
+ *
+ * Some applications rely on the ability to change MTU without stopping
+ * the device. So keep this behavior for now.
+ */
rte_spinlock_lock(&enic->mtu_lock);
/* Stop traffic on all RQs */
@@ -1214,12 +1532,12 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* now it is safe to reconfigure the RQs */
- /* update the mtu */
- eth_dev->data->mtu = new_mtu;
/* free and reallocate RQs with the new MTU */
for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (!rq->in_use)
+ continue;
enic_free_rq(rq);
rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
@@ -1240,7 +1558,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* put back the real receive function */
rte_mb();
- eth_dev->rx_pkt_burst = enic_recv_pkts;
+ pick_rx_handler(enic);
rte_mb();
/* restart Rx traffic */
@@ -1282,6 +1600,8 @@ static int enic_dev_init(struct enic *enic)
/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
enic->conf_cq_count, 8);
+ enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
+ enic->conf_intr_count, 8);
enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
enic->conf_rq_count, 8);
enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
@@ -1290,6 +1610,10 @@ static int enic_dev_init(struct enic *enic)
dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
return -1;
}
+ if (enic->conf_intr_count > 0 && enic->intr == NULL) {
+ dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
+ return -1;
+ }
if (enic->conf_rq_count > 0 && enic->rq == NULL) {
dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
return -1;
@@ -1319,6 +1643,38 @@ static int enic_dev_init(struct enic *enic)
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ enic->overlay_offload = false;
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->tx_offload_capa |=
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ /*
+ * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
+ * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
+ */
+ enic->tx_offload_mask |=
+ PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_MASK;
+ enic->overlay_offload = true;
+ enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
+ dev_info(enic, "Overlay offload is enabled\n");
+ /*
+ * Reset the vxlan port to the default, as the NIC firmware
+ * does not reset it automatically and keeps the old setting.
+ */
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ ENIC_DEFAULT_VXLAN_PORT)) {
+ dev_err(enic, "failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1351,6 +1707,15 @@ int enic_probe(struct enic *enic)
enic_alloc_consistent,
enic_free_consistent);
+ /*
+ * Allocate the consistent memory for stats upfront so both primary and
+ * secondary processes can dump stats.
+ */
+ err = vnic_dev_alloc_stats_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ goto err_out_unregister;
+ }
/* Issue device open to get device in known state */
err = enic_dev_open(enic);
if (err) {
@@ -1359,8 +1724,10 @@ int enic_probe(struct enic *enic)
}
/* Set ingress vlan rewrite mode before vnic initialization */
+ dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
+ enic->ig_vlan_rewrite_mode);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PASS_THRU);
+ enic->ig_vlan_rewrite_mode);
if (err) {
dev_err(enic,
"Failed to set ingress vlan rewrite mode, aborting.\n");
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index c99d6183..8d493ffe 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -76,19 +76,26 @@ int enic_get_vnic_config(struct enic *enic)
? "" : "not "));
err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,
- &enic->filter_tags);
+ &enic->filter_actions);
if (err) {
dev_err(enic_get_dev(enic),
"Error getting filter modes, %d\n", err);
return err;
}
+ vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk,
+ &enic->udp_rss_weak);
- dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n",
+ dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n",
((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" :
((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" :
"NONE"))),
- ((enic->filter_tags) ? "" : "not "));
+ ((enic->filter_actions & FILTER_ACTION_RQ_STEERING_FLAG) ?
+ "steer " : ""),
+ ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ?
+ "tag " : ""),
+ ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ?
+ "drop " : ""));
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -117,7 +124,10 @@ int enic_get_vnic_config(struct enic *enic)
"loopback tag 0x%04x\n",
ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
- ENIC_SETTING(enic, RSS) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ?
+ (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" :
+ ((enic->udp_rss_weak ? "+udp" :
+ "yes"))) : "no",
c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
@@ -128,6 +138,74 @@ int enic_get_vnic_config(struct enic *enic)
c->intr_timer_usec,
c->loop_tag);
+ /* RSS settings from vNIC */
+ enic->reta_size = ENIC_RSS_RETA_SIZE;
+ enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE;
+ enic->flow_type_rss_offloads = 0;
+ if (ENIC_SETTING(enic, RSSHASH_IPV4))
+ /*
+ * IPV4 hash type handles both non-frag and frag packet types.
+ * TCP/UDP is controlled via a separate flag below.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
+ ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (ENIC_SETTING(enic, RSSHASH_IPV6))
+ /*
+ * The VIC adapter can perform RSS on IPv6 packets with and
+ * without extension headers. An IPv6 "fragment" is an IPv6
+ * packet with the fragment extension header.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX;
+ if (enic->udp_rss_weak)
+ enic->flow_type_rss_offloads |=
+ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+
+ /* Zero offloads if RSS is not enabled */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->flow_type_rss_offloads = 0;
+
+ enic->vxlan = ENIC_SETTING(enic, VXLAN) &&
+ vnic_dev_capable_vxlan(enic->vdev);
+ /*
+ * Default hardware capabilities. enic_dev_init() may add additional
+ * flags if it enables overlay offloads.
+ */
+ enic->tx_queue_offload_capa = 0;
+ enic->tx_offload_capa =
+ enic->tx_queue_offload_capa |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ enic->rx_offload_capa =
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ enic->tx_offload_mask =
+ PKT_TX_VLAN_PKT |
+ PKT_TX_IP_CKSUM |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG;
+
return 0;
}
@@ -161,6 +239,7 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
+ enum vnic_devcmd_cmd cmd;
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
@@ -171,8 +250,8 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
a0 = nic_cfg;
a1 = 0;
-
- return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+ cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG;
+ return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
}
int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
@@ -202,7 +281,8 @@ void enic_free_vnic_resources(struct enic *enic)
vnic_rq_free(&enic->rq[i]);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
- vnic_intr_free(&enic->intr);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_free(&enic->intr[i]);
}
void enic_get_res_counts(struct enic *enic)
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index cf3a6fde..3786bc0e 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -16,6 +16,13 @@
#define ENIC_MIN_RQ_DESCS 64
#define ENIC_MAX_RQ_DESCS 4096
+/* A descriptor ring has a multiple of 32 descriptors */
+#define ENIC_ALIGN_DESCS 32
+#define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1)
+
+/* Request a completion index every 32 buffers (roughly packets) */
+#define ENIC_WQ_CQ_THRESH 32
+
#define ENIC_MIN_MTU 68
/* Does not include (possible) inserted VLAN tag and FCS */
@@ -30,6 +37,21 @@
#define ENIC_NON_TSO_MAX_DESC 16
#define ENIC_DEFAULT_RX_FREE_THRESH 32
#define ENIC_TX_XMIT_MAX 64
+#define ENIC_RX_BURST_MAX 64
+
+/* Defaults for dev_info.default_{rx,tx}portconf */
+#define ENIC_DEFAULT_RX_BURST 32
+#define ENIC_DEFAULT_RX_RINGS 1
+#define ENIC_DEFAULT_RX_RING_SIZE 512
+#define ENIC_DEFAULT_TX_BURST 32
+#define ENIC_DEFAULT_TX_RINGS 1
+#define ENIC_DEFAULT_TX_RING_SIZE 512
+
+#define ENIC_RSS_DEFAULT_CPU 0
+#define ENIC_RSS_BASE_CPU 0
+#define ENIC_RSS_HASH_BITS 7
+#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS)
+#define ENIC_RSS_HASH_KEY_SIZE 40
#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 2fe5a3fa..7129e121 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -15,15 +15,6 @@
#include <rte_ip.h>
#include <rte_tcp.h>
-#define ENIC_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
-
-#define ENIC_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
-
#define RTE_PMD_USE_PREFETCH
#ifdef RTE_PMD_USE_PREFETCH
@@ -130,30 +121,103 @@ enic_cq_rx_check_err(struct cq_desc *cqd)
/* Lookup table to translate RX CQ flags to mbuf flags. */
static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
uint8_t cqrd_flags = cqrd->flags;
+ /*
+ * Odd-numbered entries are for tunnel packets. All packet type info
+ * applies to the inner packet, and there is no info on the outer
+ * packet. The outer flags in these entries exist only to avoid
+ * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
+ * afterwards.
+ *
+ * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
+ * RTE_PTYPE_TUNNEL_GRENAT..
+ */
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
+ [0x01] = RTE_PTYPE_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER,
[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
- return cq_type_table[cqrd_flags];
+ return cq_type_table[cqrd_flags + tnl];
}
static inline void
@@ -200,10 +264,18 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
uint32_t l4_flags;
l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
- if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
- pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
- else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4)
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ /*
+ * When overlay offload is enabled, the NIC may
+ * set ipv4_csum_ok=1 if the inner packet is IPv6..
+ * So, explicitly check for IPv4 before checking
+ * ipv4_csum_ok.
+ */
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ }
if (l4_flags == RTE_PTYPE_L4_UDP ||
l4_flags == RTE_PTYPE_L4_TCP) {
@@ -238,13 +310,14 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
struct vnic_rq *rq;
struct enic *enic = vnic_dev_priv(sop_rq->vdev);
uint16_t cq_idx;
- uint16_t rq_idx;
+ uint16_t rq_idx, max_rx;
uint16_t rq_num;
struct rte_mbuf *nmb, *rxmb;
uint16_t nb_rx = 0;
struct vnic_cq *cq;
volatile struct cq_desc *cqd_ptr;
uint8_t color;
+ uint8_t tnl;
uint16_t seg_length;
struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
@@ -252,19 +325,23 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
data_rq = &enic->rq[sop_rq->data_queue_idx];
- while (nb_rx < nb_pkts) {
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
+
+ while (max_rx) {
volatile struct rq_enet_desc *rqd_ptr;
struct cq_desc cqd;
uint8_t packet_error;
uint16_t ciflags;
+ max_rx--;
+
/* Check for pkts available */
- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
- & CQ_DESC_COLOR_MASK;
- if (color == cq->last_color)
+ if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
break;
/* Get the cq descriptor and extract rq info from it */
@@ -288,13 +365,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Get the mbuf to return and replace with one just allocated */
rxmb = rq->mbuf_ring[rq_idx];
rq->mbuf_ring[rq_idx] = nmb;
-
- /* Increment cqd, rqd, mbuf_table index */
cq_idx++;
- if (unlikely(cq_idx == cq->ring.desc_count)) {
- cq_idx = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
/* Prefetch next mbuf & desc while processing current one */
cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
@@ -336,10 +407,22 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
continue;
}
+ /*
+ * When overlay offload is enabled, CQ.fcoe indicates the
+ * packet is tunnelled.
+ */
+ tnl = enic->overlay_offload &&
+ (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
/* cq rx flags are only valid if eop bit is set */
- first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
+ first_seg->packet_type =
+ enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
enic_cq_rx_to_pkt_flags(&cqd, first_seg);
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
if (unlikely(packet_error)) {
rte_pktmbuf_free(first_seg);
rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
@@ -354,6 +437,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* store the mbuf address into the next entry of the array */
rx_pkts[nb_rx++] = first_seg;
}
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
sop_rq->pkt_first_seg = first_seg;
sop_rq->pkt_last_seg = last_seg;
@@ -387,9 +474,123 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+uint16_t
+enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mb, **rx, **rxmb;
+ uint16_t cq_idx, nb_rx, max_rx;
+ struct cq_enet_rq_desc *cqd;
+ struct rq_enet_desc *rqd;
+ unsigned int port_id;
+ struct vnic_cq *cq;
+ struct vnic_rq *rq;
+ struct enic *enic;
+ uint8_t color;
+ bool overlay;
+ bool tnl;
+
+ rq = rx_queue;
+ enic = vnic_dev_priv(rq->vdev);
+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ cq_idx = cq->to_clean;
+
+ /*
+ * Fill up the reserve of free mbufs. Below, we restock the receive
+ * ring with these mbufs to avoid allocation failures.
+ */
+ if (rq->num_free_mbufs == 0) {
+ if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
+ ENIC_RX_BURST_MAX))
+ return 0;
+ rq->num_free_mbufs = ENIC_RX_BURST_MAX;
+ }
+
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
+ max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
+
+ cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
+ rxmb = rq->mbuf_ring + cq_idx;
+ port_id = enic->port_id;
+ overlay = enic->overlay_offload;
+
+ rx = rx_pkts;
+ while (max_rx) {
+ max_rx--;
+ if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+ break;
+ if (unlikely(cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
+ rte_pktmbuf_free(*rxmb++);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ cqd++;
+ continue;
+ }
+
+ mb = *rxmb++;
+ /* prefetch mbuf data for caller */
+ rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
+ RTE_PKTMBUF_HEADROOM));
+ mb->data_len = cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ mb->pkt_len = mb->data_len;
+ mb->port = port_id;
+ tnl = overlay && (cqd->completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
+ mb->packet_type =
+ enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
+ tnl);
+ enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
+ cqd++;
+ *rx++ = mb;
+ }
+ /* Number of descriptors visited */
+ nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
+ if (nb_rx == 0)
+ return 0;
+ rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
+ rxmb = rq->mbuf_ring + cq_idx;
+ cq_idx += nb_rx;
+ rq->rx_nb_hold += nb_rx;
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
+ cq->to_clean = cq_idx;
+
+ memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
+ sizeof(struct rte_mbuf *) * nb_rx);
+ rq->num_free_mbufs -= nb_rx;
+ while (nb_rx) {
+ nb_rx--;
+ mb = *rxmb++;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ rqd++;
+ }
+ if (rq->rx_nb_hold > rq->rx_free_thresh) {
+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
+ rq->posted_index,
+ rq->rx_nb_hold);
+ rq->rx_nb_hold = 0;
+ rte_wmb();
+ iowrite32_relaxed(rq->posted_index,
+ &rq->ctrl->posted_index);
+ }
+
+ return rx - rx_pkts;
+}
+
static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
{
- struct vnic_wq_buf *buf;
+ struct rte_mbuf *buf;
struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
unsigned int nb_to_free, nb_free = 0, i;
struct rte_mempool *pool;
@@ -399,13 +600,10 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ 1;
tail_idx = wq->tail_idx;
- buf = &wq->bufs[tail_idx];
- pool = ((struct rte_mbuf *)buf->mb)->pool;
+ pool = wq->bufs[tail_idx]->pool;
for (i = 0; i < nb_to_free; i++) {
- buf = &wq->bufs[tail_idx];
- m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
- buf->mb = NULL;
-
+ buf = wq->bufs[tail_idx];
+ m = rte_pktmbuf_prefree_seg(buf);
if (unlikely(m == NULL)) {
tail_idx = enic_ring_incr(desc_count, tail_idx);
continue;
@@ -443,9 +641,10 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
return 0;
}
-uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
int32_t ret;
uint16_t i;
uint64_t ol_flags;
@@ -453,9 +652,13 @@ uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
+ if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ rte_errno = EINVAL;
+ return i;
+ }
ol_flags = m->ol_flags;
- if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ if (ol_flags & wq->tx_offload_notsup_mask) {
+ rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
@@ -489,12 +692,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t ol_flags_mask;
unsigned int wq_desc_avail;
int head_idx;
- struct vnic_wq_buf *buf;
unsigned int desc_count;
struct wq_enet_desc *descs, *desc_p, desc_tmp;
uint16_t mss;
uint8_t vlan_tag_insert;
- uint8_t eop;
+ uint8_t eop, cq;
uint64_t bus_addr;
uint8_t offload_mode;
uint16_t header_len;
@@ -558,6 +760,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
mss = tx_pkt->tso_segsz;
+ /* For tunnel, need the size of outer+inner headers */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ header_len += tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
+ }
}
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
@@ -572,15 +779,18 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
break;
}
}
-
-
+ wq->cq_pend++;
+ cq = 0;
+ if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
- offload_mode, eop, eop, 0, vlan_tag_insert,
+ offload_mode, eop, cq, 0, vlan_tag_insert,
vlan_id, 0);
*desc_p = desc_tmp;
- buf = &wq->bufs[head_idx];
- buf->mb = (void *)tx_pkt;
+ wq->bufs[head_idx] = tx_pkt;
head_idx = enic_ring_incr(desc_count, head_idx);
wq_desc_avail--;
@@ -589,20 +799,26 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_pkt->next) {
data_len = tx_pkt->data_len;
- if (tx_pkt->next == NULL)
+ wq->cq_pend++;
+ cq = 0;
+ if (tx_pkt->next == NULL) {
eop = 1;
+ if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
+ }
desc_p = descs + head_idx;
bus_addr = (dma_addr_t)(tx_pkt->buf_iova
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
- mss, 0, offload_mode, eop, eop,
+ mss, 0, offload_mode, eop, cq,
0, vlan_tag_insert, vlan_id,
0);
*desc_p = desc_tmp;
- buf = &wq->bufs[head_idx];
- buf->mb = (void *)tx_pkt;
+ wq->bufs[head_idx] = tx_pkt;
head_idx = enic_ring_incr(desc_count, head_idx);
wq_desc_avail--;
}
@@ -618,4 +834,81 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return index;
}
+static void enqueue_simple_pkts(struct rte_mbuf **pkts,
+ struct wq_enet_desc *desc,
+ uint16_t n,
+ struct enic *enic)
+{
+ struct rte_mbuf *p;
+
+ while (n) {
+ n--;
+ p = *pkts++;
+ desc->address = p->buf_iova + p->data_off;
+ desc->length = p->pkt_len;
+ /*
+ * The app should not send oversized
+ * packets. tx_pkt_prepare includes a check as
+ * well. But some apps ignore the device max size and
+ * tx_pkt_prepare. Oversized packets cause WQ errrors
+ * and the NIC ends up disabling the whole WQ. So
+ * truncate packets..
+ */
+ if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ desc->length = ENIC_TX_MAX_PKT_SIZE;
+ rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+ }
+ desc++;
+ }
+}
+
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ unsigned int head_idx, desc_count;
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ struct enic *enic;
+ uint16_t rem, n;
+
+ wq = (struct vnic_wq *)tx_queue;
+ enic = vnic_dev_priv(wq->vdev);
+ enic_cleanup_wq(enic, wq);
+ /* Will enqueue this many packets in this call */
+ nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
+ if (nb_pkts == 0)
+ return 0;
+
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+
+ /* Descriptors until the end of the ring */
+ n = desc_count - head_idx;
+ n = RTE_MIN(nb_pkts, n);
+ /* Save mbuf pointers to free later */
+ memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
+
+ /* Enqueue until the ring end */
+ rem = nb_pkts - n;
+ desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
+ enqueue_simple_pkts(tx_pkts, desc, n, enic);
+
+ /* Wrap to the start of the ring */
+ if (rem) {
+ tx_pkts += n;
+ memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ enqueue_simple_pkts(tx_pkts, desc, rem, enic);
+ }
+ rte_wmb();
+
+ /* Update head_idx and desc_avail */
+ wq->ring.desc_avail -= nb_pkts;
+ head_idx += nb_pkts;
+ if (head_idx >= desc_count)
+ head_idx -= desc_count;
+ wq->head_idx = head_idx;
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
+ return nb_pkts;
+}
diff --git a/drivers/net/enic/meson.build b/drivers/net/enic/meson.build
new file mode 100644
index 00000000..bfd4e237
--- /dev/null
+++ b/drivers/net/enic/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cisco Systems, Inc.
+
+sources = files(
+ 'base/vnic_cq.c',
+ 'base/vnic_dev.c',
+ 'base/vnic_intr.c',
+ 'base/vnic_rq.c',
+ 'base/vnic_rss.c',
+ 'base/vnic_wq.c',
+ 'enic_clsf.c',
+ 'enic_ethdev.c',
+ 'enic_flow.c',
+ 'enic_main.c',
+ 'enic_res.c',
+ 'enic_rxtx.c',
+ )
+deps += ['hash']
+includes += include_directories('base')