summaryrefslogtreecommitdiffstats
path: root/src/dpdk/drivers/net/enic
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2017-02-05 15:21:19 +0200
committerIdo Barnea <ibarnea@cisco.com>2017-02-13 12:32:01 +0200
commit9ca4a157305e4e23a892ba9bafc9eee0f66954ce (patch)
tree1a8afcf815fd33e7623e3c16246abe86c01bc8fd /src/dpdk/drivers/net/enic
parent2dab8f65015e9fa90df395be6ee1a07e9ac71044 (diff)
dpdk1702-rc2 upstream files unchanged + mlx5 driver rc3
Signed-off-by: Ido Barnea <ibarnea@cisco.com>
Diffstat (limited to 'src/dpdk/drivers/net/enic')
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_dev.c37
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_rq.c6
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_rq.h2
-rw-r--r--src/dpdk/drivers/net/enic/enic.h41
-rw-r--r--src/dpdk/drivers/net/enic/enic_clsf.c66
-rw-r--r--src/dpdk/drivers/net/enic/enic_compat.h27
-rw-r--r--src/dpdk/drivers/net/enic/enic_ethdev.c103
-rw-r--r--src/dpdk/drivers/net/enic/enic_main.c279
-rw-r--r--src/dpdk/drivers/net/enic/enic_res.c5
-rw-r--r--src/dpdk/drivers/net/enic/enic_res.h6
-rw-r--r--src/dpdk/drivers/net/enic/enic_rxtx.c198
11 files changed, 504 insertions, 266 deletions
diff --git a/src/dpdk/drivers/net/enic/base/vnic_dev.c b/src/dpdk/drivers/net/enic/base/vnic_dev.c
index dddb1dcd..84e4840a 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_dev.c
+++ b/src/dpdk/drivers/net/enic/base/vnic_dev.c
@@ -266,32 +266,35 @@ void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
memset(ring->descs, 0, ring->size);
}
-int vnic_dev_alloc_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring,
- unsigned int desc_count, unsigned int desc_size, unsigned int socket_id,
+ unsigned int desc_count, unsigned int desc_size,
+ __attribute__((unused)) unsigned int socket_id,
char *z_name)
{
- const struct rte_memzone *rz;
+ void *alloc_addr = NULL;
+ dma_addr_t alloc_pa = 0;
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
-
- rz = rte_memzone_reserve_aligned(z_name,
- ring->size_unaligned, socket_id,
- 0, ENIC_ALIGN);
- if (!rz) {
+ alloc_addr = vdev->alloc_consistent(vdev->priv,
+ ring->size_unaligned,
+ &alloc_pa, (u8 *)z_name);
+ if (!alloc_addr) {
pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
-
- ring->descs_unaligned = rz->addr;
- if (!ring->descs_unaligned) {
+ ring->descs_unaligned = alloc_addr;
+ if (!alloc_pa) {
pr_err("Failed to map allocated ring (size=%d), aborting\n",
(int)ring->size);
+ vdev->free_consistent(vdev->priv,
+ ring->size_unaligned,
+ alloc_addr,
+ alloc_pa);
return -ENOMEM;
}
-
- ring->base_addr_unaligned = (dma_addr_t)rz->phys_addr;
+ ring->base_addr_unaligned = alloc_pa;
ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
ring->base_align);
@@ -308,8 +311,13 @@ int vnic_dev_alloc_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
struct vnic_dev_ring *ring)
{
- if (ring->descs)
+ if (ring->descs) {
+ vdev->free_consistent(vdev->priv,
+ ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
ring->descs = NULL;
+ }
}
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
@@ -668,7 +676,6 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
(allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
-
if (err)
pr_err("Can't set packet filter\n");
diff --git a/src/dpdk/drivers/net/enic/base/vnic_rq.c b/src/dpdk/drivers/net/enic/base/vnic_rq.c
index 0e700a12..10a40c1b 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_rq.c
+++ b/src/dpdk/drivers/net/enic/base/vnic_rq.c
@@ -87,9 +87,11 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
iowrite32(0, &rq->ctrl->error_status);
iowrite32(fetch_index, &rq->ctrl->fetch_index);
iowrite32(posted_index, &rq->ctrl->posted_index);
- if (rq->is_sop)
- iowrite32(((rq->is_sop << 10) | rq->data_queue_idx),
+ if (rq->data_queue_enable)
+ iowrite32(((1 << 10) | rq->data_queue_idx),
&rq->ctrl->data_ring);
+ else
+ iowrite32(0, &rq->ctrl->data_ring);
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
diff --git a/src/dpdk/drivers/net/enic/base/vnic_rq.h b/src/dpdk/drivers/net/enic/base/vnic_rq.h
index fd9e1704..f3fd39f7 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_rq.h
+++ b/src/dpdk/drivers/net/enic/base/vnic_rq.h
@@ -91,11 +91,13 @@ struct vnic_rq {
uint16_t rxst_idx;
uint32_t tot_pkts;
uint16_t data_queue_idx;
+ uint8_t data_queue_enable;
uint8_t is_sop;
uint8_t in_use;
struct rte_mbuf *pkt_first_seg;
struct rte_mbuf *pkt_last_seg;
unsigned int max_mbufs_per_pkt;
+ uint16_t tot_nb_desc;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/src/dpdk/drivers/net/enic/enic.h b/src/dpdk/drivers/net/enic/enic.h
index 9117cc76..a4540178 100644
--- a/src/dpdk/drivers/net/enic/enic.h
+++ b/src/dpdk/drivers/net/enic/enic.h
@@ -60,6 +60,7 @@
#define ENIC_RQ_MAX 16
#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_MAX_MAC_ADDR 64
#define VLAN_ETH_HLEN 18
@@ -97,13 +98,11 @@ struct enic_fdir {
void (*copy_fltr_fn)(struct filter_v2 *filt,
struct rte_eth_fdir_input *input,
struct rte_eth_fdir_masks *masks);
-
};
struct enic_soft_stats {
rte_atomic64_t rx_nombuf;
rte_atomic64_t rx_packet_errors;
- rte_atomic64_t tx_oversized;
};
struct enic_memzone_entry {
@@ -168,17 +167,32 @@ struct enic {
/* linked list storing memory allocations */
LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
rte_spinlock_t memzone_list_lock;
+ rte_spinlock_t mtu_lock;
};
-static inline unsigned int enic_sop_rq(unsigned int rq)
+/* Get the CQ index from a Start of Packet(SOP) RQ index */
+static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
{
- return rq * 2;
+ return sop_idx / 2;
}
-static inline unsigned int enic_data_rq(unsigned int rq)
+/* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
+static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
{
- return rq * 2 + 1;
+ return sop_idx / 2;
+}
+
+/* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
+{
+ return rte_idx * 2;
+}
+
+/* Get the Data RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx)
+{
+ return rte_idx * 2 + 1;
}
static inline unsigned int enic_vnic_rq_count(struct enic *enic)
@@ -252,7 +266,7 @@ extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
extern void enic_free_rq(void *rxq);
extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
unsigned int socket_id, struct rte_mempool *mp,
- uint16_t nb_desc);
+ uint16_t nb_desc, uint16_t free_thresh);
extern int enic_set_rss_nic_cfg(struct enic *enic);
extern int enic_set_vnic_res(struct enic *enic);
extern void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size);
@@ -264,8 +278,8 @@ extern void enic_dev_stats_get(struct enic *enic,
struct rte_eth_stats *r_stats);
extern void enic_dev_stats_clear(struct enic *enic);
extern void enic_add_packet_filter(struct enic *enic);
-extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
-extern void enic_del_mac_address(struct enic *enic);
+void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
+void enic_del_mac_address(struct enic *enic, int mac_index);
extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
struct rte_mbuf *tx_pkt, unsigned short len,
@@ -278,14 +292,17 @@ extern int enic_clsf_init(struct enic *enic);
extern void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
+int enic_link_update(struct enic *enic);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
-void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
- __rte_unused struct rte_eth_fdir_input *input,
__rte_unused struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
#endif /* _ENIC_H_ */
diff --git a/src/dpdk/drivers/net/enic/enic_clsf.c b/src/dpdk/drivers/net/enic/enic_clsf.c
index 1610951d..bcf479ac 100644
--- a/src/dpdk/drivers/net/enic/enic_clsf.c
+++ b/src/dpdk/drivers/net/enic/enic_clsf.c
@@ -74,7 +74,7 @@ void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
{
- info->mode = enic->fdir.modes;
+ info->mode = (enum rte_fdir_mode)enic->fdir.modes;
info->flow_types_mask[0] = enic->fdir.types_mask;
}
@@ -107,7 +107,6 @@ enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
memcpy(gp->layer[layer].val, val, len);
}
-
/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
* without advanced filter support.
*/
@@ -133,28 +132,6 @@ copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
}
-#define TREX_PATCH
-#ifdef TREX_PATCH
-void
-copy_fltr_recv_all(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks) {
- struct filter_generic_1 *gp = &fltr->u.generic_1;
- memset(gp, 0, sizeof(*gp));
-
- struct ether_hdr eth_mask, eth_val;
- memset(&eth_mask, 0, sizeof(eth_mask));
- memset(&eth_val, 0, sizeof(eth_val));
-
- eth_val.ether_type = 0xdead;
- eth_mask.ether_type = 0;
-
- gp->position = 0;
- enic_set_layer(gp, 0, FILTER_GENERIC_1_L2,
- &eth_mask, &eth_val, sizeof(struct ether_hdr));
-
-}
-#endif
-
/* Copy Flow Director filter to a VIC generic filter (requires advanced
* filter support.
*/
@@ -165,15 +142,8 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
struct filter_generic_1 *gp = &fltr->u.generic_1;
int i;
- RTE_ASSERT(enic->adv_filters);
-
fltr->type = FILTER_DPDK_1;
memset(gp, 0, sizeof(*gp));
-#ifdef TREX_PATCH
- // important for this to be below 2.
- // If added with position 2, IPv6 UDP and ICMP seems to be caught by some other rule
- gp->position = 1;
-#endif
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
struct udp_hdr udp_mask, udp_val;
@@ -185,7 +155,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
udp_val.src_port = input->flow.udp4_flow.src_port;
}
if (input->flow.udp4_flow.dst_port) {
- udp_mask.src_port = masks->dst_port_mask;
+ udp_mask.dst_port = masks->dst_port_mask;
udp_val.dst_port = input->flow.udp4_flow.dst_port;
}
@@ -241,13 +211,9 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
if (input->flow.ip4_flow.tos) {
- ip4_mask.type_of_service = masks->ipv4_mask.tos;
+ ip4_mask.type_of_service = 0xff;
ip4_val.type_of_service = input->flow.ip4_flow.tos;
}
- if (input->flow.ip4_flow.ip_id) {
- ip4_mask.packet_id = 0xffff;
- ip4_val.packet_id = input->flow.ip4_flow.ip_id;
- }
if (input->flow.ip4_flow.ttl) {
ip4_mask.time_to_live = 0xff;
ip4_val.time_to_live = input->flow.ip4_flow.ttl;
@@ -333,7 +299,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
if (input->flow.ipv6_flow.proto) {
- ipv6_mask.proto = masks->ipv6_mask.proto;
+ ipv6_mask.proto = 0xff;
ipv6_val.proto = input->flow.ipv6_flow.proto;
}
for (i = 0; i < 4; i++) {
@@ -349,8 +315,8 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
input->flow.ipv6_flow.dst_ip[i];
}
if (input->flow.ipv6_flow.tc) {
- ipv6_mask.vtc_flow = ((uint32_t)masks->ipv6_mask.tc<<12);
- ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
+ ipv6_mask.vtc_flow = 0x00ff0000;
+ ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 16;
}
if (input->flow.ipv6_flow.hop_limits) {
ipv6_mask.hop_limits = 0xff;
@@ -372,11 +338,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
case -EINVAL:
case -ENOENT:
enic->fdir.stats.f_remove++;
-#ifdef TREX_PATCH
- return pos;
-#else
return -EINVAL;
-#endif
default:
/* The entry is present in the table */
key = enic->fdir.nodes[pos];
@@ -420,7 +382,7 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
}
/* Get the enicpmd RQ from the DPDK Rx queue */
- queue = enic_sop_rq(params->action.rx_queue);
+ queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
if (!enic->rq[queue].in_use)
return -EINVAL;
@@ -487,18 +449,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
key->filter = *params;
key->rq_index = queue;
-#ifdef TREX_PATCH
- switch (params->soft_id) {
- case 100:
- copy_fltr_recv_all(&fltr, &params->input, &enic->rte_dev->data->dev_conf.fdir_conf.mask);
- break;
- default:
-#endif
- enic->fdir.copy_fltr_fn(&fltr, &params->input,
- &enic->rte_dev->data->dev_conf.fdir_conf.mask);
-#ifdef TREX_PATCH
- }
-#endif
+ enic->fdir.copy_fltr_fn(&fltr, &params->input,
+ &enic->rte_dev->data->dev_conf.fdir_conf.mask);
if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
key->fltr_id = queue;
diff --git a/src/dpdk/drivers/net/enic/enic_compat.h b/src/dpdk/drivers/net/enic/enic_compat.h
index 5dbd983b..fc58bb41 100644
--- a/src/dpdk/drivers/net/enic/enic_compat.h
+++ b/src/dpdk/drivers/net/enic/enic_compat.h
@@ -41,6 +41,7 @@
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_log.h>
+#include <rte_io.h>
#define ENIC_PAGE_ALIGN 4096UL
#define ENIC_ALIGN ENIC_PAGE_ALIGN
@@ -95,42 +96,52 @@ typedef unsigned long long dma_addr_t;
static inline uint32_t ioread32(volatile void *addr)
{
- return *(volatile uint32_t *)addr;
+ return rte_read32(addr);
}
static inline uint16_t ioread16(volatile void *addr)
{
- return *(volatile uint16_t *)addr;
+ return rte_read16(addr);
}
static inline uint8_t ioread8(volatile void *addr)
{
- return *(volatile uint8_t *)addr;
+ return rte_read8(addr);
}
static inline void iowrite32(uint32_t val, volatile void *addr)
{
- *(volatile uint32_t *)addr = val;
+ rte_write32(val, addr);
+}
+
+static inline void iowrite32_relaxed(uint32_t val, volatile void *addr)
+{
+ rte_write32_relaxed(val, addr);
}
static inline void iowrite16(uint16_t val, volatile void *addr)
{
- *(volatile uint16_t *)addr = val;
+ rte_write16(val, addr);
}
static inline void iowrite8(uint8_t val, volatile void *addr)
{
- *(volatile uint8_t *)addr = val;
+ rte_write8(val, addr);
}
static inline unsigned int readl(volatile void __iomem *addr)
{
- return *(volatile unsigned int *)addr;
+ return rte_read32(addr);
+}
+
+static inline unsigned int readl_relaxed(volatile void __iomem *addr)
+{
+ return rte_read32_relaxed(addr);
}
static inline void writel(unsigned int val, volatile void __iomem *addr)
{
- *(volatile unsigned int *)addr = val;
+ rte_write32(val, addr);
}
#define min_t(type, x, y) ({ \
diff --git a/src/dpdk/drivers/net/enic/enic_ethdev.c b/src/dpdk/drivers/net/enic/enic_ethdev.c
index 6a86e23f..bffa8700 100644
--- a/src/dpdk/drivers/net/enic/enic_ethdev.c
+++ b/src/dpdk/drivers/net/enic/enic_ethdev.c
@@ -154,7 +154,7 @@ static int enicpmd_dev_setup_intr(struct enic *enic)
return 0;
/* check start of packet (SOP) RQs only in case scatter is disabled. */
for (index = 0; index < enic->rq_count; index++) {
- if (!enic->rq[enic_sop_rq(index)].ctrl)
+ if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
break;
}
if (enic->rq_count != index)
@@ -262,6 +262,35 @@ static void enicpmd_dev_rx_queue_release(void *rxq)
enic_free_rq(rxq);
}
+static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint32_t queue_count = 0;
+ struct vnic_cq *cq;
+ uint32_t cq_tail;
+ uint16_t cq_idx;
+ int rq_num;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ dev_err(enic, "Invalid RX queue id=%d", rx_queue_id);
+ return 0;
+ }
+
+ rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+ cq_idx = cq->to_clean;
+
+ cq_tail = ioread32(&cq->ctrl->cq_tail);
+
+ if (cq_tail < cq_idx)
+ cq_tail += cq->ring.desc_count;
+
+ queue_count = cq_tail - cq_idx;
+
+ return queue_count;
+}
+
static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -284,18 +313,15 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
}
eth_dev->data->rx_queues[queue_idx] =
- (void *)&enic->rq[enic_sop_rq(queue_idx)];
+ (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
- ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
+ ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
+ rx_conf->rx_free_thresh);
if (ret) {
dev_err(enic, "error in allocating rq\n");
return ret;
}
- enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh;
- dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
- enic->rq[queue_idx].rx_free_thresh);
-
return enicpmd_dev_setup_intr(enic);
}
@@ -405,17 +431,9 @@ static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
__rte_unused int wait_to_complete)
{
struct enic *enic = pmd_priv(eth_dev);
- int ret;
- int link_status = 0;
ENICPMD_FUNC_TRACE();
- link_status = enic_get_link_status(enic);
- ret = (link_status == enic->link_status);
- enic->link_status = link_status;
- eth_dev->data->dev_link.link_status = link_status;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- return ret;
+ return enic_link_update(enic);
}
static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
@@ -435,22 +453,19 @@ static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
enic_dev_stats_clear(enic);
}
-
-
-
static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *device_info)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
+ device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
device_info->min_rx_bufsize = ENIC_MIN_MTU;
- device_info->max_rx_pktlen = enic->rte_dev->data->mtu
- + ETHER_HDR_LEN + 4;
- device_info->max_mac_addrs = 1;
+ device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
+ device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
device_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
@@ -460,17 +475,18 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
-
- device_info->speed_capa = ETH_LINK_SPEED_40G;
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_L4_TCP,
@@ -531,12 +547,12 @@ static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
enic_set_mac_address(enic, mac_addr->addr_bytes);
}
-static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index)
+static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_del_mac_address(enic);
+ enic_del_mac_address(enic, index);
}
static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
@@ -575,7 +591,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.tx_queue_stop = enicpmd_dev_tx_queue_stop,
.rx_queue_setup = enicpmd_dev_rx_queue_setup,
.rx_queue_release = enicpmd_dev_rx_queue_release,
- .rx_queue_count = NULL,
+ .rx_queue_count = enicpmd_dev_rx_queue_count,
.rx_descriptor_done = NULL,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
@@ -607,7 +623,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
- pdev = eth_dev->pci_dev;
+ pdev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
@@ -620,32 +636,15 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_enic_pmd = {
.pci_drv = {
- .name = "rte_enic_pmd",
.id_table = pci_id_enic_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_enicpmd_dev_init,
.dev_private_size = sizeof(struct enic),
};
-/* Driver initialization routine.
- * Invoked once at EAL init time.
- * Register as the [Poll Mode] Driver of Cisco ENIC device.
- */
-static int
-rte_enic_pmd_init(__rte_unused const char *name,
- __rte_unused const char *params)
-{
- ENICPMD_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_enic_pmd);
- return 0;
-}
-
-static struct rte_driver rte_enic_driver = {
- .type = PMD_PDEV,
- .init = rte_enic_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_enic_driver, enic);
-DRIVER_REGISTER_PCI_TABLE(enic, pci_id_enic_map);
+RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio");
diff --git a/src/dpdk/drivers/net/enic/enic_main.c b/src/dpdk/drivers/net/enic/enic_main.c
index 4530dcf4..21e8edeb 100644
--- a/src/dpdk/drivers/net/enic/enic_main.c
+++ b/src/dpdk/drivers/net/enic/enic_main.c
@@ -137,7 +137,6 @@ static void enic_clear_soft_stats(struct enic *enic)
struct enic_soft_stats *soft_stats = &enic->soft_stats;
rte_atomic64_clear(&soft_stats->rx_nombuf);
rte_atomic64_clear(&soft_stats->rx_packet_errors);
- rte_atomic64_clear(&soft_stats->tx_oversized);
}
static void enic_init_soft_stats(struct enic *enic)
@@ -145,7 +144,6 @@ static void enic_init_soft_stats(struct enic *enic)
struct enic_soft_stats *soft_stats = &enic->soft_stats;
rte_atomic64_init(&soft_stats->rx_nombuf);
rte_atomic64_init(&soft_stats->rx_packet_errors);
- rte_atomic64_init(&soft_stats->tx_oversized);
enic_clear_soft_stats(enic);
}
@@ -168,7 +166,6 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
return;
}
-
/* The number of truncated packets can only be calculated by
* subtracting a hardware counter from error packets received by
* the driver. Note: this causes transient inaccuracies in the
@@ -177,26 +174,28 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
* which can make ibytes be slightly higher than it should be.
*/
rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
- rx_truncated = rx_packet_errors - stats->rx.rx_errors -
- stats->rx.rx_no_bufs;
+ rx_truncated = rx_packet_errors - stats->rx.rx_errors;
r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
r_stats->opackets = stats->tx.tx_frames_ok;
- r_stats->ibytes = stats->rx.rx_unicast_bytes_ok+stats->rx.rx_multicast_bytes_ok+stats->rx.rx_broadcast_bytes_ok;
+ r_stats->ibytes = stats->rx.rx_bytes_ok;
r_stats->obytes = stats->tx.tx_bytes_ok;
r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
- r_stats->oerrors = stats->tx.tx_errors + rte_atomic64_read(&soft_stats->tx_oversized);
+ r_stats->oerrors = stats->tx.tx_errors;
r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
}
-void enic_del_mac_address(struct enic *enic)
+void enic_del_mac_address(struct enic *enic, int mac_index)
{
- if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
+
+ if (vnic_dev_del_addr(enic->vdev, mac_addr))
dev_err(enic, "del mac addr failed\n");
}
@@ -209,15 +208,6 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
return;
}
- err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
- if (err) {
- dev_err(enic, "del mac addr failed\n");
- return;
- }
-
- ether_addr_copy((struct ether_addr *)mac_addr,
- (struct ether_addr *)enic->mac_addr);
-
err = vnic_dev_add_addr(enic->vdev, mac_addr);
if (err) {
dev_err(enic, "add mac addr failed\n");
@@ -244,14 +234,14 @@ void enic_init_vnic_resources(struct enic *enic)
struct vnic_rq *data_rq;
for (index = 0; index < enic->rq_count; index++) {
- cq_idx = enic_cq_rq(enic, enic_sop_rq(index));
+ cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
- vnic_rq_init(&enic->rq[enic_sop_rq(index)],
+ vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
cq_idx,
error_interrupt_enable,
error_interrupt_offset);
- data_rq = &enic->rq[enic_data_rq(index)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
if (data_rq->in_use)
vnic_rq_init(data_rq,
cq_idx,
@@ -414,14 +404,32 @@ enic_free_consistent(void *priv,
rte_free(mze);
}
+int enic_link_update(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ int ret;
+ int link_status = 0;
+
+ link_status = enic_get_link_status(enic);
+ ret = (link_status == enic->link_status);
+ enic->link_status = link_status;
+ eth_dev->data->dev_link.link_status = link_status;
+ eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+ return ret;
+}
+
static void
enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
void *arg)
{
- struct enic *enic = pmd_priv((struct rte_eth_dev *)arg);
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
+ struct enic *enic = pmd_priv(dev);
vnic_intr_return_all_credits(&enic->intr);
+ enic_link_update(enic);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
}
@@ -433,7 +441,13 @@ int enic_enable(struct enic *enic)
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
+ /* vnic notification of link status has already been turned on in
+ * enic_dev_init() which is called during probe time. Here we are
+ * just turning on interrupt vector 0 if needed.
+ */
+ if (eth_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, 0);
if (enic_clsf_init(enic))
dev_warning(enic, "Init of hash table for clsf failed."\
@@ -441,17 +455,17 @@ int enic_enable(struct enic *enic)
for (index = 0; index < enic->rq_count; index++) {
err = enic_alloc_rx_queue_mbufs(enic,
- &enic->rq[enic_sop_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
if (err) {
dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
return err;
}
err = enic_alloc_rx_queue_mbufs(enic,
- &enic->rq[enic_data_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
if (err) {
/* release the allocated mbufs for the sop rq*/
enic_rxmbuf_queue_release(enic,
- &enic->rq[enic_sop_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
dev_err(enic, "Failed to alloc data RX queue mbufs\n");
return err;
@@ -520,7 +534,10 @@ void enic_free_rq(void *rxq)
if (rq_data->in_use)
vnic_rq_free(rq_data);
- vnic_cq_free(&enic->cq[rq_sop->index]);
+ vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
+
+ rq_sop->in_use = 0;
+ rq_data->in_use = 0;
}
void enic_start_wq(struct enic *enic, uint16_t queue_idx)
@@ -545,8 +562,10 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
void enic_start_rq(struct enic *enic, uint16_t queue_idx)
{
- struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
struct rte_eth_dev *eth_dev = enic->rte_dev;
if (rq_data->in_use)
@@ -560,8 +579,10 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
{
int ret1 = 0, ret2 = 0;
struct rte_eth_dev *eth_dev = enic->rte_dev;
- struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
ret2 = vnic_rq_disable(rq_sop);
rte_mb();
@@ -579,16 +600,17 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
unsigned int socket_id, struct rte_mempool *mp,
- uint16_t nb_desc)
+ uint16_t nb_desc, uint16_t free_thresh)
{
int rc;
- uint16_t sop_queue_idx = enic_sop_rq(queue_idx);
- uint16_t data_queue_idx = enic_data_rq(queue_idx);
+ uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+ uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
+ uint16_t mtu = enic->rte_dev->data->mtu;
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
@@ -599,14 +621,18 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
rq_data->socket_id = socket_id;
rq_data->mp = mp;
rq_sop->in_use = 1;
+ rq_sop->rx_free_thresh = free_thresh;
+ rq_data->rx_free_thresh = free_thresh;
+ dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
+ free_thresh);
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
- dev_info(enic, "Scatter rx mode enabled\n");
+ dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
- mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) +
+ mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
(mbuf_size - 1)) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
@@ -614,10 +640,13 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
}
if (mbufs_per_pkt > 1) {
- dev_info(enic, "Scatter rx mode in use\n");
+ dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
+ rq_sop->data_queue_enable = 1;
rq_data->in_use = 1;
} else {
- dev_info(enic, "Scatter rx mode not being used\n");
+ dev_info(enic, "Rq %u Scatter rx mode not being used\n",
+ queue_idx);
+ rq_sop->data_queue_enable = 0;
rq_data->in_use = 0;
}
@@ -654,7 +683,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
}
if (mbufs_per_pkt > 1) {
dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
- enic->config.mtu, mbuf_size, min_sop + min_data,
+ mtu, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -705,6 +734,8 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
goto err_free_sop_mbuf;
}
+ rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
+
return 0;
err_free_sop_mbuf:
@@ -801,6 +832,10 @@ int enic_disable(struct enic *enic)
vnic_intr_mask(&enic->intr);
(void)vnic_intr_masked(&enic->intr); /* flush write */
+ rte_intr_disable(&enic->pdev->intr_handle);
+ rte_intr_callback_unregister(&enic->pdev->intr_handle,
+ enic_intr_handler,
+ (void *)enic->rte_dev);
vnic_dev_disable(enic->vdev);
@@ -822,8 +857,14 @@ int enic_disable(struct enic *enic)
}
}
+ /* If we were using interrupts, set the interrupt vector to -1
+ * to disable interrupts. We are not disabling link notifcations,
+ * though, as we want the polling of link status to continue working.
+ */
+ if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, -1);
+
vnic_dev_set_reset_flag(enic->vdev, 1);
- vnic_dev_notify_unset(enic->vdev);
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
@@ -925,7 +966,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
for (i = 0; i < (1 << rss_hash_bits); i++)
(*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
- enic_sop_rq(i % enic->rq_count);
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
@@ -1025,6 +1066,9 @@ static void enic_dev_deinit(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
+ /* stop link status checking */
+ vnic_dev_notify_unset(enic->vdev);
+
rte_free(eth_dev->data->mac_addrs);
}
@@ -1066,6 +1110,56 @@ int enic_set_vnic_res(struct enic *enic)
return rc;
}
+/* Initialize the completion queue for an RQ */
+static int
+enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
+{
+ struct vnic_rq *sop_rq, *data_rq;
+ unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+ int rc = 0;
+
+ sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+
+ vnic_cq_clean(&enic->cq[cq_idx]);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ 0 /* cq_message_addr */);
+
+
+ vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
+ enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
+ sop_rq->ring.desc_count - 1, 1, 0);
+ if (data_rq->in_use) {
+ vnic_rq_init_start(data_rq,
+ enic_cq_rq(enic,
+ enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
+ data_rq->ring.desc_count - 1, 1, 0);
+ }
+
+ rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
+ if (rc)
+ return rc;
+
+ if (data_rq->in_use) {
+ rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
+ if (rc) {
+ enic_rxmbuf_queue_release(enic, sop_rq);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
/* The Cisco NIC can send and receive packets up to a max packet size
* determined by the NIC type and firmware. There is also an MTU
* configured into the NIC via the CIMC/UCSM management interface
@@ -1075,6 +1169,9 @@ int enic_set_vnic_res(struct enic *enic)
*/
int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
{
+ unsigned int rq_idx;
+ struct vnic_rq *rq;
+ int rc = 0;
uint16_t old_mtu; /* previous setting */
uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
struct rte_eth_dev *eth_dev = enic->rte_dev;
@@ -1082,10 +1179,6 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
old_mtu = eth_dev->data->mtu;
config_mtu = enic->config.mtu;
- /* only works with Rx scatter disabled */
- if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter)
- return -ENOTSUP;
-
if (new_mtu > enic->max_mtu) {
dev_err(enic,
"MTU not updated: requested (%u) greater than max (%u)\n",
@@ -1103,11 +1196,83 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
+ /* The easy case is when scatter is disabled. However if the MTU
+ * becomes greater than the mbuf data size, packet drops will ensue.
+ */
+ if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+ eth_dev->data->mtu = new_mtu;
+ goto set_mtu_done;
+ }
+
+ /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
+ * change Rx scatter mode if necessary for better performance. I.e. if
+ * MTU was greater than the mbuf size and now it's less, scatter Rx
+ * doesn't have to be used and vice versa.
+ */
+ rte_spinlock_lock(&enic->mtu_lock);
+
+ /* Stop traffic on all RQs */
+ for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
+ rq = &enic->rq[rq_idx];
+ if (rq->is_sop && rq->in_use) {
+ rc = enic_stop_rq(enic,
+ enic_sop_rq_idx_to_rte_idx(rq_idx));
+ if (rc) {
+ dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
+ goto set_mtu_done;
+ }
+ }
+ }
+
+ /* replace Rx funciton with a no-op to avoid getting stale pkts */
+ eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
+ rte_mb();
+
+ /* Allow time for threads to exit the real Rx function. */
+ usleep(100000);
+
+ /* now it is safe to reconfigure the RQs */
+
/* update the mtu */
eth_dev->data->mtu = new_mtu;
+ /* free and reallocate RQs with the new MTU */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+
+ enic_free_rq(rq);
+ rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
+ rq->tot_nb_desc, rq->rx_free_thresh);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU alloc error- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+
+ rc = enic_reinit_rq(enic, rq_idx);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU RQ reinit- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+ }
+
+ /* put back the real receive function */
+ rte_mb();
+ eth_dev->rx_pkt_burst = enic_recv_pkts;
+ rte_mb();
+
+ /* restart Rx traffic */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (rq->is_sop && rq->in_use)
+ enic_start_rq(enic, rq_idx);
+ }
+
+set_mtu_done:
dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
- return 0;
+ rte_spinlock_unlock(&enic->mtu_lock);
+ return rc;
}
static int enic_dev_init(struct enic *enic)
@@ -1125,24 +1290,32 @@ static int enic_dev_init(struct enic *enic)
return err;
}
+ /* Get available resource counts */
+ enic_get_res_counts(enic);
+ if (enic->conf_rq_count == 1) {
+ dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
+ dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
+ dev_err(enic, "See the ENIC PMD guide for more information.\n");
+ return -EINVAL;
+ }
+
/* Get the supported filters */
enic_fdir_info(enic);
- eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
+ * ENIC_MAX_MAC_ADDR, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
return -1;
}
ether_addr_copy((struct ether_addr *) enic->mac_addr,
- &eth_dev->data->mac_addrs[0]);
-
-
- /* Get available resource counts
- */
- enic_get_res_counts(enic);
+ eth_dev->data->mac_addrs);
vnic_dev_set_reset_flag(enic->vdev, 0);
+ /* set up link status checking */
+ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
return 0;
}
diff --git a/src/dpdk/drivers/net/enic/enic_res.c b/src/dpdk/drivers/net/enic/enic_res.c
index 140c6efb..8a230a16 100644
--- a/src/dpdk/drivers/net/enic/enic_res.c
+++ b/src/dpdk/drivers/net/enic/enic_res.c
@@ -89,11 +89,10 @@ int enic_get_vnic_config(struct enic *enic)
/* max packet size is only defined in newer VIC firmware
* and will be 0 for legacy firmware and VICs
*/
- if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
+ if (c->max_pkt_size > ENIC_DEFAULT_MAX_PKT_SIZE)
enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
else
- enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE
- - (ETHER_HDR_LEN + 4);
+ enic->max_mtu = ENIC_DEFAULT_MAX_PKT_SIZE - (ETHER_HDR_LEN + 4);
if (c->mtu == 0)
c->mtu = 1500;
diff --git a/src/dpdk/drivers/net/enic/enic_res.h b/src/dpdk/drivers/net/enic/enic_res.h
index cda2da1e..303530ef 100644
--- a/src/dpdk/drivers/net/enic/enic_res.h
+++ b/src/dpdk/drivers/net/enic/enic_res.h
@@ -48,11 +48,7 @@
#define ENIC_MIN_MTU 68
/* Does not include (possible) inserted VLAN tag and FCS */
-#define ENIC_DEFAULT_RX_MAX_PKT_SIZE 9022
-
-/* Does not include (possible) inserted VLAN tag and FCS */
-#define ENIC_TX_MAX_PKT_SIZE 9208
-
+#define ENIC_DEFAULT_MAX_PKT_SIZE 9022
#define ENIC_MULTICAST_PERFECT_FILTERS 32
#define ENIC_UNICAST_PERFECT_FILTERS 32
diff --git a/src/dpdk/drivers/net/enic/enic_rxtx.c b/src/dpdk/drivers/net/enic/enic_rxtx.c
index cd155024..26b83ae9 100644
--- a/src/dpdk/drivers/net/enic/enic_rxtx.c
+++ b/src/dpdk/drivers/net/enic/enic_rxtx.c
@@ -37,6 +37,9 @@
#include "enic_compat.h"
#include "rq_enet_desc.h"
#include "enic.h"
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
#define RTE_PMD_USE_PREFETCH
@@ -129,6 +132,60 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
+/* Find the offset to L5. This is needed by enic TSO implementation.
+ * Return 0 if not a TCP packet or can't figure out the length.
+ */
+static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
+{
+ struct ether_hdr *eh;
+ struct vlan_hdr *vh;
+ struct ipv4_hdr *ip4;
+ struct ipv6_hdr *ip6;
+ struct tcp_hdr *th;
+ uint8_t hdr_len;
+ uint16_t ether_type;
+
+ /* offset past Ethernet header */
+ eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ ether_type = eh->ether_type;
+ hdr_len = sizeof(struct ether_hdr);
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+ vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
+ ether_type = vh->eth_proto;
+ hdr_len += sizeof(struct vlan_hdr);
+ }
+
+ /* offset past IP header */
+ switch (rte_be_to_cpu_16(ether_type)) {
+ case ETHER_TYPE_IPv4:
+ ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
+ if (ip4->next_proto_id != IPPROTO_TCP)
+ return 0;
+ hdr_len += (ip4->version_ihl & 0xf) * 4;
+ break;
+ case ETHER_TYPE_IPv6:
+ ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
+ if (ip6->proto != IPPROTO_TCP)
+ return 0;
+ hdr_len += sizeof(struct ipv6_hdr);
+ break;
+ default:
+ return 0;
+ }
+
+ if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
+ return 0;
+
+ /* offset past TCP header */
+ th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
+ hdr_len += (th->data_off >> 4) * 4;
+
+ if (hdr_len > mbuf->pkt_len)
+ return 0;
+
+ return hdr_len;
+}
+
static inline uint8_t
enic_cq_rx_check_err(struct cq_desc *cqd)
{
@@ -149,30 +206,18 @@ enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
uint8_t cqrd_flags = cqrd->flags;
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
- [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_NONFRAG,
- [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_FRAG,
- [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_NONFRAG,
- [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_FRAG,
- [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
+ [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
@@ -185,9 +230,10 @@ static inline void
enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t ciflags, bwflags, pkt_flags = 0;
+ uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
ciflags = enic_cq_rx_desc_ciflags(cqrd);
bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ vlan_tci = enic_cq_rx_desc_vlan(cqrd);
mbuf->ol_flags = 0;
@@ -195,13 +241,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
goto mbuf_flags_done;
- /* VLAN stripping */
+ /* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
} else {
- mbuf->vlan_tci = 0;
+ if (vlan_tci != 0)
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ else
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
}
+ mbuf->vlan_tci = vlan_tci;
/* RSS flag */
if (enic_cq_rx_desc_rss_type(cqrd)) {
@@ -210,13 +260,25 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
}
/* checksum flags */
- if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_csum_not_calc(cqrd))
+ pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN &
+ PKT_RX_L4_CKSUM_UNKNOWN);
+ else {
+ uint32_t l4_flags;
+ l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+ if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
}
}
@@ -224,6 +286,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
mbuf->ol_flags = pkt_flags;
}
+/* dummy receive function to replace actual function in
+ * order to do safe reconfiguration operations.
+ */
+uint16_t
+enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
uint16_t
enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -373,10 +446,11 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rte_mb();
if (data_rq->in_use)
- iowrite32(data_rq->posted_index,
- &data_rq->ctrl->posted_index);
+ iowrite32_relaxed(data_rq->posted_index,
+ &data_rq->ctrl->posted_index);
rte_compiler_barrier();
- iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
+ iowrite32_relaxed(sop_rq->posted_index,
+ &sop_rq->ctrl->posted_index);
}
@@ -459,6 +533,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint8_t vlan_tag_insert;
uint8_t eop;
uint64_t bus_addr;
+ uint8_t offload_mode;
+ uint16_t header_len;
enic_cleanup_wq(enic, wq);
wq_desc_avail = vnic_wq_desc_avail(wq);
@@ -470,23 +546,16 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (index = 0; index < nb_pkts; index++) {
tx_pkt = *tx_pkts++;
- pkt_len = tx_pkt->pkt_len;
- data_len = tx_pkt->data_len;
- ol_flags = tx_pkt->ol_flags;
nb_segs = tx_pkt->nb_segs;
-
- if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
- rte_pktmbuf_free(tx_pkt);
- rte_atomic64_inc(&enic->soft_stats.tx_oversized);
- continue;
- }
-
if (nb_segs > wq_desc_avail) {
if (index > 0)
goto post;
goto done;
}
+ pkt_len = tx_pkt->pkt_len;
+ data_len = tx_pkt->data_len;
+ ol_flags = tx_pkt->ol_flags;
mss = 0;
vlan_id = 0;
vlan_tag_insert = 0;
@@ -497,13 +566,17 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
desc_p = descs + head_idx;
eop = (data_len == pkt_len);
-
- if (ol_flags & ol_flags_mask) {
- if (ol_flags & PKT_TX_VLAN_PKT) {
- vlan_tag_insert = 1;
- vlan_id = tx_pkt->vlan_tci;
+ offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
+ header_len = 0;
+
+ if (tx_pkt->tso_segsz) {
+ header_len = tso_header_len(tx_pkt);
+ if (header_len) {
+ offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
+ mss = tx_pkt->tso_segsz;
}
-
+ }
+ if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
if (ol_flags & PKT_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM;
@@ -516,8 +589,14 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
- eop, 0, vlan_tag_insert, vlan_id, 0);
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ vlan_tag_insert = 1;
+ vlan_id = tx_pkt->vlan_tci;
+ }
+
+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
+ offload_mode, eop, eop, 0, vlan_tag_insert,
+ vlan_id, 0);
*desc_p = desc_tmp;
buf = &wq->bufs[head_idx];
@@ -537,8 +616,9 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
- mss, 0, 0, eop, eop, 0,
- vlan_tag_insert, vlan_id, 0);
+ mss, 0, offload_mode, eop, eop,
+ 0, vlan_tag_insert, vlan_id,
+ 0);
*desc_p = desc_tmp;
buf = &wq->bufs[head_idx];
@@ -550,7 +630,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
post:
rte_wmb();
- iowrite32(head_idx, &wq->ctrl->posted_index);
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
done:
wq->ring.desc_avail = wq_desc_avail;
wq->head_idx = head_idx;