diff options
Diffstat (limited to 'drivers/net/enic')
-rw-r--r-- | drivers/net/enic/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/enic/base/enic_vnic_wq.h | 79 | ||||
-rw-r--r-- | drivers/net/enic/base/rq_enet_desc.h | 2 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_cq.h | 44 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_dev.c | 14 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_dev.h | 2 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_enet.h | 17 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_rq.c | 8 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_rq.h | 18 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_wq.c | 80 | ||||
-rw-r--r-- | drivers/net/enic/base/vnic_wq.h | 118 | ||||
-rw-r--r-- | drivers/net/enic/enic.h | 94 | ||||
-rw-r--r-- | drivers/net/enic/enic_clsf.c | 16 | ||||
-rw-r--r-- | drivers/net/enic/enic_ethdev.c | 90 | ||||
-rw-r--r-- | drivers/net/enic/enic_main.c | 592 | ||||
-rw-r--r-- | drivers/net/enic/enic_res.c | 30 | ||||
-rw-r--r-- | drivers/net/enic/enic_res.h | 84 | ||||
-rw-r--r-- | drivers/net/enic/enic_rx.c | 359 | ||||
-rw-r--r-- | drivers/net/enic/enic_rxtx.c | 547 |
19 files changed, 1178 insertions, 1018 deletions
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile index f3162741..3926b795 100644 --- a/drivers/net/enic/Makefile +++ b/drivers/net/enic/Makefile @@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src # SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c -SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h deleted file mode 100644 index b0191093..00000000 --- a/drivers/net/enic/base/enic_vnic_wq.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved. - * Copyright 2007 Nuova Systems, Inc. All rights reserved. - * - * Copyright (c) 2015, Cisco Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef _ENIC_VNIC_WQ_H_ -#define _ENIC_VNIC_WQ_H_ - -#include "vnic_dev.h" -#include "vnic_cq.h" - -static inline void enic_vnic_post_wq_index(struct vnic_wq *wq) -{ - struct vnic_wq_buf *buf = wq->to_use; - - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &wq->ctrl->posted_index); -} - -static inline void enic_vnic_post_wq(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, - unsigned int len, int sop, - uint8_t desc_skip_cnt, uint8_t cq_entry, - uint8_t compressed_send, uint64_t wrid) -{ - struct vnic_wq_buf *buf = wq->to_use; - - buf->sop = sop; - buf->cq_entry = cq_entry; - buf->compressed_send = compressed_send; - buf->desc_skip_cnt = desc_skip_cnt; - buf->os_buf = os_buf; - buf->dma_addr = dma_addr; - buf->len = len; - buf->wr_id = wrid; - - buf = buf->next; - wq->ring.desc_avail -= desc_skip_cnt; - wq->to_use = buf; - - if (cq_entry) - enic_vnic_post_wq_index(wq); -} - -#endif /* _ENIC_VNIC_WQ_H_ */ diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h index 7292d9dc..13e24b43 100644 --- a/drivers/net/enic/base/rq_enet_desc.h +++ b/drivers/net/enic/base/rq_enet_desc.h @@ -55,7 +55,7 @@ enum rq_enet_type_types { #define RQ_ENET_TYPE_BITS 2 #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) -static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, +static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc, u64 address, u8 type, u16 length) { desc->address = cpu_to_le64(address); diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h index 922391b3..13ab87ca 100644 --- a/drivers/net/enic/base/vnic_cq.h +++ b/drivers/net/enic/base/vnic_cq.h @@ -90,50 +90,6 @@ struct vnic_cq { #endif }; -static inline unsigned int vnic_cq_service(struct vnic_cq *cq, - unsigned int work_to_do, - int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque), - void *opaque) -{ - struct cq_desc *cq_desc; - unsigned int work_done = 0; - u16 q_number, completed_index; - u8 type, color; - struct rte_mbuf **rx_pkts = opaque; - unsigned int ret; - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); - - while (color != cq->last_color) { - if (opaque) - opaque = (void *)&(rx_pkts[work_done]); - - ret = (*q_service)(cq->vdev, cq_desc, type, - q_number, completed_index, opaque); - cq->to_clean++; - if (cq->to_clean == cq->ring.desc_count) { - cq->to_clean = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); - - if (ret) - work_done++; - if (work_done >= work_to_do) - break; - } - - return work_done; -} - void vnic_cq_free(struct vnic_cq *cq); int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int socket_id, diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c index e8a50287..fc2e4cc3 100644 --- a/drivers/net/enic/base/vnic_dev.c +++ b/drivers/net/enic/base/vnic_dev.c @@ -83,7 +83,7 @@ struct vnic_dev { struct vnic_intr_coal_timer_info intr_coal_timer_info; void *(*alloc_consistent)(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name); - void (*free_consistent)(struct rte_pci_device *hwdev, + void (*free_consistent)(void *priv, size_t size, void *vaddr, dma_addr_t dma_handle); }; @@ -101,7 +101,7 @@ void *vnic_dev_priv(struct vnic_dev *vdev) void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name), - void (*free_consistent)(struct rte_pci_device *hwdev, + void (*free_consistent)(void *priv, size_t size, void *vaddr, dma_addr_t dma_handle)) { @@ -807,7 +807,7 @@ int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) int vnic_dev_notify_unset(struct vnic_dev *vdev) { if (vdev->notify && !vnic_dev_in_reset(vdev)) { - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); @@ -924,16 +924,16 @@ void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { if (vdev->notify) - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); if (vdev->stats) - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); kfree(vdev); @@ -1041,7 +1041,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait); *entry = (u16)a0; - vdev->free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa); + vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); } else if (cmd == CLSF_DEL) { a0 = *entry; ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h index 113d6acc..689442f3 100644 --- a/drivers/net/enic/base/vnic_dev.h +++ b/drivers/net/enic/base/vnic_dev.h @@ -102,7 +102,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name), - void (*free_consistent)(struct rte_pci_device *hwdev, + void (*free_consistent)(void *priv, size_t size, void *vaddr, dma_addr_t dma_handle)); void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h index cc34998f..50622479 100644 --- a/drivers/net/enic/base/vnic_enet.h +++ b/drivers/net/enic/base/vnic_enet.h @@ -35,6 +35,10 @@ #ifndef _VNIC_ENIC_H_ #define _VNIC_ENIC_H_ +/* Hardware intr coalesce timer is in units of 1.5us */ +#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2 / 3) +#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3 / 2) + /* Device-specific region: enet configuration */ struct vnic_enet_config { u32 flags; @@ -50,6 +54,12 @@ struct vnic_enet_config { u16 vf_rq_count; u16 num_arfs; u64 mem_paddr; + u16 rdma_qp_id; + u16 rdma_qp_count; + u16 rdma_resgrp; + u32 rdma_mr_id; + u32 rdma_mr_count; + u32 max_pkt_size; }; #define VENETF_TSO 0x1 /* TSO enabled */ @@ -64,9 +74,14 @@ struct vnic_enet_config { #define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ #define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ #define VENETF_LOOP 0x800 /* Loopback enabled */ -#define VENETF_VMQ 0x4000 /* using VMQ flag for VMware NETQ */ +#define VENETF_FAILOVER 0x1000 /* Fabric failover enabled */ +#define VENETF_USPACE_NIC 0x2000 /* vHPC enabled */ +#define VENETF_VMQ 0x4000 /* VMQ enabled */ +#define VENETF_ARFS 0x8000 /* ARFS enabled */ #define VENETF_VXLAN 0x10000 /* VxLAN offload */ #define VENETF_NVGRE 0x20000 /* NVGRE offload */ +#define VENETF_GRPINTR 0x40000 /* group interrupt */ + #define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ #define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c index cb62c5e5..0e700a12 100644 --- a/drivers/net/enic/base/vnic_rq.c +++ b/drivers/net/enic/base/vnic_rq.c @@ -84,11 +84,12 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, iowrite32(cq_index, &rq->ctrl->cq_index); iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); - iowrite32(0, &rq->ctrl->dropped_packet_count); iowrite32(0, &rq->ctrl->error_status); iowrite32(fetch_index, &rq->ctrl->fetch_index); iowrite32(posted_index, &rq->ctrl->posted_index); - + if (rq->is_sop) + iowrite32(((rq->is_sop << 10) | rq->data_queue_idx), + &rq->ctrl->data_ring); } void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, @@ -96,6 +97,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_offset) { u32 fetch_index = 0; + /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); @@ -110,6 +112,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, error_interrupt_offset); rq->rxst_idx = 0; rq->tot_pkts = 0; + rq->pkt_first_seg = NULL; + rq->pkt_last_seg = NULL; } void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error) diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h index e083ccc2..fd9e1704 100644 --- a/drivers/net/enic/base/vnic_rq.h +++ b/drivers/net/enic/base/vnic_rq.h @@ -60,10 +60,18 @@ struct vnic_rq_ctrl { u32 pad7; u32 error_status; /* 0x48 */ u32 pad8; - u32 dropped_packet_count; /* 0x50 */ + u32 tcp_sn; /* 0x50 */ u32 pad9; - u32 dropped_packet_count_rc; /* 0x58 */ + u32 unused; /* 0x58 */ u32 pad10; + u32 dca_select; /* 0x60 */ + u32 pad11; + u32 dca_value; /* 0x68 */ + u32 pad12; + u32 data_ring; /* 0x70 */ + u32 pad13; + u32 header_split; /* 0x78 */ + u32 pad14; }; struct vnic_rq { @@ -82,6 +90,12 @@ struct vnic_rq { struct rte_mempool *mp; uint16_t rxst_idx; uint32_t tot_pkts; + uint16_t data_queue_idx; + uint8_t is_sop; + uint8_t in_use; + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; + unsigned int max_mbufs_per_pkt; }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c index a3ef4170..9b9ff4d7 100644 --- a/drivers/net/enic/base/vnic_wq.c +++ b/drivers/net/enic/base/vnic_wq.c @@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { - struct vnic_wq_buf *buf; - unsigned int i, j, count = wq->ring.desc_count; - unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); - - for (i = 0; i < blks; i++) { - wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); - if (!wq->bufs[i]) - return -ENOMEM; - } - - for (i = 0; i < blks; i++) { - buf = wq->bufs[i]; - for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { - buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; - buf->desc = (u8 *)wq->ring.descs + - wq->ring.desc_size * buf->index; - if (buf->index + 1 == count) { - buf->next = wq->bufs[0]; - break; - } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { - buf->next = wq->bufs[i + 1]; - } else { - buf->next = buf + 1; - buf++; - } - } - } - - wq->to_use = wq->to_clean = wq->bufs[0]; - + unsigned int count = wq->ring.desc_count; + /* Allocate the mbuf ring */ + wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs", + sizeof(struct vnic_wq_buf) * count, + RTE_CACHE_LINE_SIZE, wq->socket_id); + wq->head_idx = 0; + wq->tail_idx = 0; + if (wq->bufs == NULL) + return -ENOMEM; return 0; } void vnic_wq_free(struct vnic_wq *wq) { struct vnic_dev *vdev; - unsigned int i; vdev = wq->vdev; vnic_dev_free_desc_ring(vdev, &wq->ring); - for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { - if (wq->bufs[i]) { - kfree(wq->bufs[i]); - wq->bufs[i] = NULL; - } - } - + rte_free(wq->bufs); wq->ctrl = NULL; } -int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, - unsigned int desc_size) -{ - int mem_size = 0; - - mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size); - - mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) * - VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count); - - return mem_size; -} - int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) @@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); iowrite32(0, &wq->ctrl->error_status); - wq->to_use = wq->to_clean = - &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] - [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; + wq->head_idx = fetch_index; + wq->tail_idx = wq->head_idx; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, @@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); + wq->last_completed_index = 0; } void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error) @@ -220,21 +179,24 @@ int vnic_wq_disable(struct vnic_wq *wq) } void vnic_wq_clean(struct vnic_wq *wq, - void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) + void (*buf_clean)(struct vnic_wq_buf *buf)) { struct vnic_wq_buf *buf; + unsigned int to_clean = wq->tail_idx; - buf = wq->to_clean; + buf = &wq->bufs[to_clean]; while (vnic_wq_desc_used(wq) > 0) { - (*buf_clean)(wq, buf); + (*buf_clean)(buf); + to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); - buf = wq->to_clean = buf->next; + buf = &wq->bufs[to_clean]; wq->ring.desc_avail++; } - wq->to_use = wq->to_clean = wq->bufs[0]; + wq->head_idx = 0; + wq->tail_idx = 0; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h index c23de625..38a217f1 100644 --- a/drivers/net/enic/base/vnic_wq.h +++ b/drivers/net/enic/base/vnic_wq.h @@ -38,6 +38,7 @@ #include "vnic_dev.h" #include "vnic_cq.h" +#include <rte_memzone.h> /* Work queue control */ struct vnic_wq_ctrl { @@ -64,42 +65,23 @@ struct vnic_wq_ctrl { u32 pad9; }; +/* 16 bytes */ struct vnic_wq_buf { - struct vnic_wq_buf *next; - dma_addr_t dma_addr; - void *os_buf; - unsigned int len; - unsigned int index; - int sop; - void *desc; - uint64_t wr_id; /* Cookie */ - uint8_t cq_entry; /* Gets completion event from hw */ - uint8_t desc_skip_cnt; /* Num descs to occupy */ - uint8_t compressed_send; /* Both hdr and payload in one desc */ + struct rte_mempool *pool; + void *mb; }; -/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ -#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 -#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 -#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ - ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ - VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) -#define VNIC_WQ_BUF_BLK_SZ(entries) \ - (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) -#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ - DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) - struct vnic_wq { unsigned int index; struct vnic_dev *vdev; struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; - struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; - struct vnic_wq_buf *to_use; - struct vnic_wq_buf *to_clean; - unsigned int pkts_outstanding; + struct vnic_wq_buf *bufs; + unsigned int head_idx; + unsigned int tail_idx; unsigned int socket_id; + const struct rte_memzone *cqmsg_rz; + uint16_t last_completed_index; }; static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) @@ -114,11 +96,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) return wq->ring.desc_count - wq->ring.desc_avail - 1; } -static inline void *vnic_wq_next_desc(struct vnic_wq *wq) -{ - return wq->to_use->desc; -} - #define PI_LOG2_CACHE_LINE_SIZE 5 #define PI_INDEX_BITS 12 #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) @@ -191,73 +168,13 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len, PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); } -static inline void vnic_wq_post(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, - unsigned int len, int sop, int eop, - uint8_t desc_skip_cnt, uint8_t cq_entry, - uint8_t compressed_send, uint64_t wrid) -{ - struct vnic_wq_buf *buf = wq->to_use; - - buf->sop = sop; - buf->cq_entry = cq_entry; - buf->compressed_send = compressed_send; - buf->desc_skip_cnt = desc_skip_cnt; - buf->os_buf = os_buf; - buf->dma_addr = dma_addr; - buf->len = len; - buf->wr_id = wrid; - - buf = buf->next; - if (eop) { -#ifdef DO_PREFETCH - uint64_t wr = vnic_cached_posted_index(dma_addr, len, - buf->index); -#endif - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); -#ifdef DO_PREFETCH - /* Intel chipsets seem to limit the rate of PIOs that we can - * push on the bus. Thus, it is very important to do a single - * 64 bit write here. With two 32-bit writes, my maximum - * pkt/sec rate was cut almost in half. -AJF - */ - iowrite64((uint64_t)wr, &wq->ctrl->posted_index); -#else - iowrite32(buf->index, &wq->ctrl->posted_index); -#endif - } - wq->to_use = buf; - - wq->ring.desc_avail -= desc_skip_cnt; -} - -static inline void vnic_wq_service(struct vnic_wq *wq, - struct cq_desc *cq_desc, u16 completed_index, - void (*buf_service)(struct vnic_wq *wq, - struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), - void *opaque) +static inline uint32_t +buf_idx_incr(uint32_t n_descriptors, uint32_t idx) { - struct vnic_wq_buf *buf; - - buf = wq->to_clean; - while (1) { - - (*buf_service)(wq, cq_desc, buf, opaque); - - wq->ring.desc_avail++; - - wq->to_clean = buf->next; - - if (buf->index == completed_index) - break; - - buf = wq->to_clean; - } + idx++; + if (unlikely(idx == n_descriptors)) + idx = 0; + return idx; } void vnic_wq_free(struct vnic_wq *wq); @@ -275,8 +192,5 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq); void vnic_wq_enable(struct vnic_wq *wq); int vnic_wq_disable(struct vnic_wq *wq); void vnic_wq_clean(struct vnic_wq *wq, - void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); -int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, - unsigned int desc_size); - + void (*buf_clean)(struct vnic_wq_buf *buf)); #endif /* _VNIC_WQ_H_ */ diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index 8c914f5b..53fed0b8 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -46,6 +46,8 @@ #include "vnic_rss.h" #include "enic_res.h" #include "cq_enet_desc.h" +#include <sys/queue.h> +#include <rte_spinlock.h> #define DRV_NAME "enic_pmd" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver" @@ -53,8 +55,11 @@ #define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc" #define ENIC_WQ_MAX 8 -#define ENIC_RQ_MAX 8 -#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) +/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both + * RQs use the same CQ. + */ +#define ENIC_RQ_MAX 16 +#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2)) #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) #define VLAN_ETH_HLEN 18 @@ -62,7 +67,6 @@ #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) #define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */ -#define PKT_TX_TCP_UDP_CKSUM 0x6000 #define ENIC_CALC_IP_CKSUM 1 #define ENIC_CALC_TCP_UDP_CKSUM 2 #define ENIC_MAX_MTU 9000 @@ -91,6 +95,16 @@ struct enic_fdir { struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX]; }; +struct enic_soft_stats { + rte_atomic64_t rx_nombuf; + rte_atomic64_t rx_packet_errors; +}; + +struct enic_memzone_entry { + const struct rte_memzone *rz; + LIST_ENTRY(enic_memzone_entry) entries; +}; + /* Per-instance private data structure */ struct enic { struct enic *next; @@ -114,6 +128,7 @@ struct enic { u8 ig_vlan_strip_en; int link_status; u8 hw_ip_checksum; + u16 max_mtu; unsigned int flags; unsigned int priv_flags; @@ -133,11 +148,38 @@ struct enic { /* interrupt resource */ struct vnic_intr intr; unsigned int intr_count; + + /* software counters */ + struct enic_soft_stats soft_stats; + + /* linked list storing memory allocations */ + LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list; + rte_spinlock_t memzone_list_lock; + }; +static inline unsigned int enic_sop_rq(unsigned int rq) +{ + return rq * 2; +} + +static inline unsigned int enic_data_rq(unsigned int rq) +{ + return rq * 2 + 1; +} + +static inline unsigned int enic_vnic_rq_count(struct enic *enic) +{ + return enic->rq_count * 2; +} + static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) { - return rq; + /* Scatter rx uses two receive queues together with one + * completion queue, so the completion queue number is no + * longer the same as the rq number. + */ + return rq / 2; } static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) @@ -155,14 +197,40 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) return (struct enic *)eth_dev->data->dev_private; } -#define RTE_LIBRTE_ENIC_ASSERT_ENABLE -#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE -#define ASSERT(x) do { \ - if (!(x)) \ - rte_panic("ENIC: x"); \ -} while (0) +static inline uint32_t +enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) +{ + uint32_t d = i0 + i1; + d -= (d >= n_descriptors) ? n_descriptors : 0; + return d; +} + +static inline uint32_t +enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1) +{ + int32_t d = i1 - i0; + return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d); +} + +static inline uint32_t +enic_ring_incr(uint32_t n_descriptors, uint32_t idx) +{ + idx++; + if (unlikely(idx == n_descriptors)) + idx = 0; + return idx; +} + +#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG +#define ENIC_ASSERT(cond) \ + do { \ + if (unlikely(!(cond))) { \ + rte_panic("line %d\tassert \"" #cond "\"" \ + "failed\n", __LINE__); \ + } \ + } while (0) #else -#define ASSERT(x) +#define ENIC_ASSERT(cond) do {} while (0) #endif extern void enic_fdir_stats_get(struct enic *enic, @@ -209,5 +277,7 @@ extern int enic_clsf_init(struct enic *enic); extern void enic_clsf_destroy(struct enic *enic); uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); - +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int enic_set_mtu(struct enic *enic, uint16_t new_mtu); #endif /* _ENIC_H_ */ diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c index edb56e1d..3365176a 100644 --- a/drivers/net/enic/enic_clsf.c +++ b/drivers/net/enic/enic_clsf.c @@ -148,9 +148,13 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) enic->fdir.nodes[pos] = NULL; if (unlikely(key->rq_index == queue)) { /* Nothing to be done */ + enic->fdir.stats.f_add++; pos = rte_hash_add_key(enic->fdir.hash, params); + if (pos < 0) { + dev_err(enic, "Add hash key failed\n"); + return pos; + } enic->fdir.nodes[pos] = key; - enic->fdir.stats.f_add++; dev_warning(enic, "FDIR rule is already present\n"); return 0; @@ -213,6 +217,11 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) } pos = rte_hash_add_key(enic->fdir.hash, params); + if (pos < 0) { + dev_err(enic, "Add hash key failed\n"); + return pos; + } + enic->fdir.nodes[pos] = key; return 0; } @@ -239,15 +248,16 @@ void enic_clsf_destroy(struct enic *enic) int enic_clsf_init(struct enic *enic) { + char clsf_name[RTE_HASH_NAMESIZE]; struct rte_hash_parameters hash_params = { - .name = "enicpmd_clsf_hash", + .name = clsf_name, .entries = ENICPMD_CLSF_HASH_ENTRIES, .key_len = sizeof(struct rte_eth_fdir_filter), .hash_func = DEFAULT_HASH_FUNC, .hash_func_init_val = 0, .socket_id = SOCKET_ID_ANY, }; - + snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name); enic->fdir.hash = rte_hash_create(&hash_params); memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats)); enic->fdir.stats.free = ENICPMD_FDIR_MAX; diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index 6bea9405..a7ce064f 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -269,14 +269,18 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); - if (queue_idx >= ENIC_RQ_MAX) { + /* With Rx scatter support, two RQs are now used on VIC per RQ used + * by the application. + */ + if (queue_idx * 2 >= ENIC_RQ_MAX) { dev_err(enic, - "Max number of RX queues exceeded. Max is %d\n", + "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n", ENIC_RQ_MAX); return -EINVAL; } - eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; + eth_dev->data->rx_queues[queue_idx] = + (void *)&enic->rq[enic_sop_rq(queue_idx)]; ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); if (ret) { @@ -435,7 +439,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, device_info->max_rx_queues = enic->rq_count; device_info->max_tx_queues = enic->wq_count; device_info->min_rx_bufsize = ENIC_MIN_MTU; - device_info->max_rx_pktlen = enic->config.mtu; + device_info->max_rx_pktlen = enic->rte_dev->data->mtu + + ETHER_HDR_LEN + 4; device_info->max_mac_addrs = 1; device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | @@ -455,8 +460,12 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_NONFRAG, RTE_PTYPE_UNKNOWN }; @@ -519,69 +528,12 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui enic_del_mac_address(enic); } - -static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { - uint16_t index; - unsigned int frags; - unsigned int pkt_len; - unsigned int seg_len; - unsigned int inc_len; - unsigned int nb_segs; - struct rte_mbuf *tx_pkt, *next_tx_pkt; - struct vnic_wq *wq = (struct vnic_wq *)tx_queue; - struct enic *enic = vnic_dev_priv(wq->vdev); - unsigned short vlan_id; - unsigned short ol_flags; - uint8_t last_seg, eop; - unsigned int host_tx_descs = 0; - - for (index = 0; index < nb_pkts; index++) { - tx_pkt = *tx_pkts++; - inc_len = 0; - nb_segs = tx_pkt->nb_segs; - if (nb_segs > vnic_wq_desc_avail(wq)) { - if (index > 0) - enic_post_wq_index(wq); - - /* wq cleanup and try again */ - if (!enic_cleanup_wq(enic, wq) || - (nb_segs > vnic_wq_desc_avail(wq))) { - return index; - } - } - - pkt_len = tx_pkt->pkt_len; - vlan_id = tx_pkt->vlan_tci; - ol_flags = tx_pkt->ol_flags; - for (frags = 0; inc_len < pkt_len; frags++) { - if (!tx_pkt) - break; - next_tx_pkt = tx_pkt->next; - seg_len = tx_pkt->data_len; - inc_len += seg_len; - - host_tx_descs++; - last_seg = 0; - eop = 0; - if ((pkt_len == inc_len) || !next_tx_pkt) { - eop = 1; - /* post if last packet in batch or > thresh */ - if ((index == (nb_pkts - 1)) || - (host_tx_descs > ENIC_TX_POST_THRESH)) { - last_seg = 1; - host_tx_descs = 0; - } - } - enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, - !frags, eop, last_seg, ol_flags, vlan_id); - tx_pkt = next_tx_pkt; - } - } + struct enic *enic = pmd_priv(eth_dev); - enic_cleanup_wq(enic, wq); - return index; + ENICPMD_FUNC_TRACE(); + return enic_set_mtu(enic, mtu); } static const struct eth_dev_ops enicpmd_eth_dev_ops = { @@ -601,7 +553,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .queue_stats_mapping_set = NULL, .dev_infos_get = enicpmd_dev_info_get, .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, - .mtu_set = NULL, + .mtu_set = enicpmd_mtu_set, .vlan_filter_set = enicpmd_vlan_filter_set, .vlan_tpid_set = NULL, .vlan_offload_set = enicpmd_vlan_offload_set, @@ -642,7 +594,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) enic->rte_dev = eth_dev; eth_dev->dev_ops = &enicpmd_eth_dev_ops; eth_dev->rx_pkt_burst = &enic_recv_pkts; - eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; + eth_dev->tx_pkt_burst = &enic_xmit_pkts; pdev = eth_dev->pci_dev; rte_eth_copy_pci_info(eth_dev, pdev); diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index e3da51db..dc831b48 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -58,18 +58,6 @@ #include "vnic_cq.h" #include "vnic_intr.h" #include "vnic_nic.h" -#include "enic_vnic_wq.h" - -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - static inline int enic_is_sriov_vf(struct enic *enic) { @@ -92,7 +80,7 @@ static int is_eth_addr_valid(uint8_t *addr) } static void -enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) +enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq) { uint16_t i; @@ -101,7 +89,7 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) return; } - for (i = 0; i < enic->config.rq_desc_count; i++) { + for (i = 0; i < rq->ring.desc_count; i++) { if (rq->mbuf_ring[i]) { rte_pktmbuf_free_seg(rq->mbuf_ring[i]); rq->mbuf_ring[i] = NULL; @@ -109,38 +97,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) } } - void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) { vnic_set_hdr_split_size(enic->vdev, split_hdr_size); } -static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf) +static void enic_free_wq_buf(struct vnic_wq_buf *buf) { - struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf; + struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb; rte_mempool_put(mbuf->pool, mbuf); - buf->os_buf = NULL; -} - -static void enic_wq_free_buf(struct vnic_wq *wq, - __rte_unused struct cq_desc *cq_desc, - struct vnic_wq_buf *buf, - __rte_unused void *opaque) -{ - enic_free_wq_buf(wq, buf); -} - -static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, - __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) -{ - struct enic *enic = vnic_dev_priv(vdev); - - vnic_wq_service(&enic->wq[q_number], cq_desc, - completed_index, enic_wq_free_buf, - opaque); - - return 0; + buf->mb = NULL; } static void enic_log_q_error(struct enic *enic) @@ -155,7 +122,7 @@ static void enic_log_q_error(struct enic *enic) error_status); } - for (i = 0; i < enic->rq_count; i++) { + for (i = 0; i < enic_vnic_rq_count(enic); i++) { error_status = vnic_rq_error_status(&enic->rq[i]); if (error_status) dev_err(enic, "RQ[%d] error_status %d\n", i, @@ -163,93 +130,62 @@ static void enic_log_q_error(struct enic *enic) } } -unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq) +static void enic_clear_soft_stats(struct enic *enic) { - unsigned int cq = enic_cq_wq(enic, wq->index); - - /* Return the work done */ - return vnic_cq_service(&enic->cq[cq], - -1 /*wq_work_to_do*/, enic_wq_service, NULL); + struct enic_soft_stats *soft_stats = &enic->soft_stats; + rte_atomic64_clear(&soft_stats->rx_nombuf); + rte_atomic64_clear(&soft_stats->rx_packet_errors); } -void enic_post_wq_index(struct vnic_wq *wq) +static void enic_init_soft_stats(struct enic *enic) { - enic_vnic_post_wq_index(wq); -} - -void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - struct rte_mbuf *tx_pkt, unsigned short len, - uint8_t sop, uint8_t eop, uint8_t cq_entry, - uint16_t ol_flags, uint16_t vlan_tag) -{ - struct wq_enet_desc *desc = vnic_wq_next_desc(wq); - uint16_t mss = 0; - uint8_t vlan_tag_insert = 0; - uint64_t bus_addr = (dma_addr_t) - (tx_pkt->buf_physaddr + tx_pkt->data_off); - - if (sop) { - if (ol_flags & PKT_TX_VLAN_PKT) - vlan_tag_insert = 1; - - if (enic->hw_ip_checksum) { - if (ol_flags & PKT_TX_IP_CKSUM) - mss |= ENIC_CALC_IP_CKSUM; - - if (ol_flags & PKT_TX_TCP_UDP_CKSUM) - mss |= ENIC_CALC_TCP_UDP_CKSUM; - } - } - - wq_enet_desc_enc(desc, - bus_addr, - len, - mss, - 0 /* header_length */, - 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */, - eop, - cq_entry, - 0 /* fcoe_encap */, - vlan_tag_insert, - vlan_tag, - 0 /* loopback */); - - enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len, - sop, - 1 /*desc_skip_cnt*/, - cq_entry, - 0 /*compressed send*/, - 0 /*wrid*/); + struct enic_soft_stats *soft_stats = &enic->soft_stats; + rte_atomic64_init(&soft_stats->rx_nombuf); + rte_atomic64_init(&soft_stats->rx_packet_errors); + enic_clear_soft_stats(enic); } void enic_dev_stats_clear(struct enic *enic) { if (vnic_dev_stats_clear(enic->vdev)) dev_err(enic, "Error in clearing stats\n"); + enic_clear_soft_stats(enic); } void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) { struct vnic_stats *stats; + struct enic_soft_stats *soft_stats = &enic->soft_stats; + int64_t rx_truncated; + uint64_t rx_packet_errors; if (vnic_dev_stats_dump(enic->vdev, &stats)) { dev_err(enic, "Error in getting stats\n"); return; } - r_stats->ipackets = stats->rx.rx_frames_ok; + /* The number of truncated packets can only be calculated by + * subtracting a hardware counter from error packets received by + * the driver. Note: this causes transient inaccuracies in the + * ipackets count. Also, the length of truncated packets are + * counted in ibytes even though truncated packets are dropped + * which can make ibytes be slightly higher than it should be. + */ + rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors); + rx_truncated = rx_packet_errors - stats->rx.rx_errors; + + r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated; r_stats->opackets = stats->tx.tx_frames_ok; r_stats->ibytes = stats->rx.rx_bytes_ok; r_stats->obytes = stats->tx.tx_bytes_ok; - r_stats->ierrors = stats->rx.rx_errors; + r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; r_stats->oerrors = stats->tx.tx_errors; - r_stats->imissed = stats->rx.rx_drop; + r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated; - r_stats->imcasts = stats->rx.rx_multicast_frames_ok; - r_stats->rx_nombuf = stats->rx.rx_no_bufs; + r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); } void enic_del_mac_address(struct enic *enic) @@ -298,12 +234,35 @@ void enic_init_vnic_resources(struct enic *enic) unsigned int error_interrupt_enable = 1; unsigned int error_interrupt_offset = 0; unsigned int index = 0; + unsigned int cq_idx; + struct vnic_rq *data_rq; for (index = 0; index < enic->rq_count; index++) { - vnic_rq_init(&enic->rq[index], - enic_cq_rq(enic, index), + cq_idx = enic_cq_rq(enic, enic_sop_rq(index)); + + vnic_rq_init(&enic->rq[enic_sop_rq(index)], + cq_idx, error_interrupt_enable, error_interrupt_offset); + + data_rq = &enic->rq[enic_data_rq(index)]; + if (data_rq->in_use) + vnic_rq_init(data_rq, + cq_idx, + error_interrupt_enable, + error_interrupt_offset); + + vnic_cq_init(&enic->cq[cq_idx], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 0 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + 0 /* interrupt offset */, + 0 /* cq_message_addr */); } for (index = 0; index < enic->wq_count; index++) { @@ -311,22 +270,19 @@ void enic_init_vnic_resources(struct enic *enic) enic_cq_wq(enic, index), error_interrupt_enable, error_interrupt_offset); - } - vnic_dev_stats_clear(enic->vdev); - - for (index = 0; index < enic->cq_count; index++) { - vnic_cq_init(&enic->cq[index], + cq_idx = enic_cq_wq(enic, index); + vnic_cq_init(&enic->cq[cq_idx], 0 /* flow_control_enable */, 1 /* color_enable */, 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, 0 /* interrupt_enable */, - 1 /* cq_entry_enable */, - 0 /* cq_message_enable */, + 0 /* cq_entry_enable */, + 1 /* cq_message_enable */, 0 /* interrupt offset */, - 0 /* cq_message_addr */); + (u64)enic->wq[index].cqmsg_rz->phys_addr); } vnic_intr_init(&enic->intr, @@ -344,30 +300,35 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) unsigned i; dma_addr_t dma_addr; + if (!rq->in_use) + return 0; + dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, rq->ring.desc_count); for (i = 0; i < rq->ring.desc_count; i++, rqd++) { - mb = rte_rxmbuf_alloc(rq->mp); + mb = rte_mbuf_raw_alloc(rq->mp); if (mb == NULL) { dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", (unsigned)rq->index); return -ENOMEM; } - dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off); - - rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, - mb->buf_len); + dma_addr = (dma_addr_t)(mb->buf_physaddr + + RTE_PKTMBUF_HEADROOM); + rq_enet_desc_enc(rqd, dma_addr, + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP + : RQ_ENET_TYPE_NOT_SOP), + mb->buf_len - RTE_PKTMBUF_HEADROOM); rq->mbuf_ring[i] = mb; } /* make sure all prior writes are complete before doing the PIO write */ rte_rmb(); - /* Post all but the last 2 cache lines' worth of descriptors */ - rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE - / sizeof(struct rq_enet_desc)); + /* Post all but the last buffer to VIC. */ + rq->posted_index = rq->ring.desc_count - 1; + rq->rx_nb_hold = 0; dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", @@ -380,12 +341,14 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) } static void * -enic_alloc_consistent(__rte_unused void *priv, size_t size, +enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name) { void *vaddr; const struct rte_memzone *rz; *dma_handle = 0; + struct enic *enic = (struct enic *)priv; + struct enic_memzone_entry *mze; rz = rte_memzone_reserve_aligned((const char *)name, size, SOCKET_ID_ANY, 0, ENIC_ALIGN); @@ -398,16 +361,49 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size, vaddr = rz->addr; *dma_handle = (dma_addr_t)rz->phys_addr; + mze = rte_malloc("enic memzone entry", + sizeof(struct enic_memzone_entry), 0); + + if (!mze) { + pr_err("%s : Failed to allocate memory for memzone list\n", + __func__); + rte_memzone_free(rz); + } + + mze->rz = rz; + + rte_spinlock_lock(&enic->memzone_list_lock); + LIST_INSERT_HEAD(&enic->memzone_list, mze, entries); + rte_spinlock_unlock(&enic->memzone_list_lock); + return vaddr; } static void -enic_free_consistent(__rte_unused struct rte_pci_device *hwdev, - __rte_unused size_t size, - __rte_unused void *vaddr, - __rte_unused dma_addr_t dma_handle) +enic_free_consistent(void *priv, + __rte_unused size_t size, + void *vaddr, + dma_addr_t dma_handle) { - /* Nothing to be done */ + struct enic_memzone_entry *mze; + struct enic *enic = (struct enic *)priv; + + rte_spinlock_lock(&enic->memzone_list_lock); + LIST_FOREACH(mze, &enic->memzone_list, entries) { + if (mze->rz->addr == vaddr && + mze->rz->phys_addr == dma_handle) + break; + } + if (mze == NULL) { + rte_spinlock_unlock(&enic->memzone_list_lock); + dev_warning(enic, + "Tried to free memory, but couldn't find it in the memzone list\n"); + return; + } + LIST_REMOVE(mze, entries); + rte_spinlock_unlock(&enic->memzone_list_lock); + rte_memzone_free(mze->rz); + rte_free(mze); } static void @@ -436,17 +432,28 @@ int enic_enable(struct enic *enic) "Flow director feature will not work\n"); for (index = 0; index < enic->rq_count; index++) { - err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]); + err = enic_alloc_rx_queue_mbufs(enic, + &enic->rq[enic_sop_rq(index)]); + if (err) { + dev_err(enic, "Failed to alloc sop RX queue mbufs\n"); + return err; + } + err = enic_alloc_rx_queue_mbufs(enic, + &enic->rq[enic_data_rq(index)]); if (err) { - dev_err(enic, "Failed to alloc RX queue mbufs\n"); + /* release the allocated mbufs for the sop rq*/ + enic_rxmbuf_queue_release(enic, + &enic->rq[enic_sop_rq(index)]); + + dev_err(enic, "Failed to alloc data RX queue mbufs\n"); return err; } } for (index = 0; index < enic->wq_count; index++) - vnic_wq_enable(&enic->wq[index]); + enic_start_wq(enic, index); for (index = 0; index < enic->rq_count; index++) - vnic_rq_enable(&enic->rq[index]); + enic_start_rq(enic, index); vnic_dev_enable_wait(enic->vdev); @@ -466,7 +473,7 @@ int enic_alloc_intr_resources(struct enic *enic) dev_info(enic, "vNIC resources used: "\ "wq %d rq %d cq %d intr %d\n", - enic->wq_count, enic->rq_count, + enic->wq_count, enic_vnic_rq_count(enic), enic->cq_count, enic->intr_count); err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); @@ -478,14 +485,32 @@ int enic_alloc_intr_resources(struct enic *enic) void enic_free_rq(void *rxq) { - struct vnic_rq *rq = (struct vnic_rq *)rxq; - struct enic *enic = vnic_dev_priv(rq->vdev); + struct vnic_rq *rq_sop, *rq_data; + struct enic *enic; + + if (rxq == NULL) + return; + + rq_sop = (struct vnic_rq *)rxq; + enic = vnic_dev_priv(rq_sop->vdev); + rq_data = &enic->rq[rq_sop->data_queue_idx]; + + enic_rxmbuf_queue_release(enic, rq_sop); + if (rq_data->in_use) + enic_rxmbuf_queue_release(enic, rq_data); + + rte_free(rq_sop->mbuf_ring); + if (rq_data->in_use) + rte_free(rq_data->mbuf_ring); + + rq_sop->mbuf_ring = NULL; + rq_data->mbuf_ring = NULL; - enic_rxmbuf_queue_release(enic, rq); - rte_free(rq->mbuf_ring); - rq->mbuf_ring = NULL; - vnic_rq_free(rq); - vnic_cq_free(&enic->cq[rq->index]); + vnic_rq_free(rq_sop); + if (rq_data->in_use) + vnic_rq_free(rq_data); + + vnic_cq_free(&enic->cq[rq_sop->index]); } void enic_start_wq(struct enic *enic, uint16_t queue_idx) @@ -500,12 +525,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx) void enic_start_rq(struct enic *enic, uint16_t queue_idx) { - vnic_rq_enable(&enic->rq[queue_idx]); + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)]; + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; + + if (rq_data->in_use) + vnic_rq_enable(rq_data); + rte_mb(); + vnic_rq_enable(rq_sop); + } int enic_stop_rq(struct enic *enic, uint16_t queue_idx) { - return vnic_rq_disable(&enic->rq[queue_idx]); + int ret1 = 0, ret2 = 0; + + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)]; + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; + + ret2 = vnic_rq_disable(rq_sop); + rte_mb(); + if (rq_data->in_use) + ret1 = vnic_rq_disable(rq_data); + + if (ret2) + return ret2; + else + return ret1; } int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, @@ -513,62 +558,156 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, uint16_t nb_desc) { int rc; - struct vnic_rq *rq = &enic->rq[queue_idx]; + uint16_t sop_queue_idx = enic_sop_rq(queue_idx); + uint16_t data_queue_idx = enic_data_rq(queue_idx); + struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx]; + struct vnic_rq *rq_data = &enic->rq[data_queue_idx]; + unsigned int mbuf_size, mbufs_per_pkt; + unsigned int nb_sop_desc, nb_data_desc; + uint16_t min_sop, max_sop, min_data, max_data; + + rq_sop->is_sop = 1; + rq_sop->data_queue_idx = data_queue_idx; + rq_data->is_sop = 0; + rq_data->data_queue_idx = 0; + rq_sop->socket_id = socket_id; + rq_sop->mp = mp; + rq_data->socket_id = socket_id; + rq_data->mp = mp; + rq_sop->in_use = 1; + + mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - + RTE_PKTMBUF_HEADROOM); + + if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) { + dev_info(enic, "Scatter rx mode enabled\n"); + /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */ + mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) + + (mbuf_size - 1)) / mbuf_size; + } else { + dev_info(enic, "Scatter rx mode disabled\n"); + mbufs_per_pkt = 1; + } - rq->socket_id = socket_id; - rq->mp = mp; + if (mbufs_per_pkt > 1) { + dev_info(enic, "Scatter rx mode in use\n"); + rq_data->in_use = 1; + } else { + dev_info(enic, "Scatter rx mode not being used\n"); + rq_data->in_use = 0; + } - if (nb_desc) { - if (nb_desc > enic->config.rq_desc_count) { - dev_warning(enic, - "RQ %d - number of rx desc in cmd line (%d)"\ - "is greater than that in the UCSM/CIMC adapter"\ - "policy. Applying the value in the adapter "\ - "policy (%d).\n", - queue_idx, nb_desc, enic->config.rq_desc_count); - nb_desc = enic->config.rq_desc_count; - } - dev_info(enic, "RX Queues - effective number of descs:%d\n", - nb_desc); + /* number of descriptors have to be a multiple of 32 */ + nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F; + nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F; + + rq_sop->max_mbufs_per_pkt = mbufs_per_pkt; + rq_data->max_mbufs_per_pkt = mbufs_per_pkt; + + if (mbufs_per_pkt > 1) { + min_sop = 64; + max_sop = ((enic->config.rq_desc_count / + (mbufs_per_pkt - 1)) & ~0x1F); + min_data = min_sop * (mbufs_per_pkt - 1); + max_data = enic->config.rq_desc_count; + } else { + min_sop = 64; + max_sop = enic->config.rq_desc_count; + min_data = 0; + max_data = 0; } - /* Allocate queue resources */ - rc = vnic_rq_alloc(enic->vdev, rq, queue_idx, - nb_desc, sizeof(struct rq_enet_desc)); + if (nb_desc < (min_sop + min_data)) { + dev_warning(enic, + "Number of rx descs too low, adjusting to minimum\n"); + nb_sop_desc = min_sop; + nb_data_desc = min_data; + } else if (nb_desc > (max_sop + max_data)) { + dev_warning(enic, + "Number of rx_descs too high, adjusting to maximum\n"); + nb_sop_desc = max_sop; + nb_data_desc = max_data; + } + if (mbufs_per_pkt > 1) { + dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n", + enic->config.mtu, mbuf_size, min_sop + min_data, + max_sop + max_data); + } + dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", + nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc); + + /* Allocate sop queue resources */ + rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx, + nb_sop_desc, sizeof(struct rq_enet_desc)); if (rc) { - dev_err(enic, "error in allocation of rq\n"); + dev_err(enic, "error in allocation of sop rq\n"); goto err_exit; } - + nb_sop_desc = rq_sop->ring.desc_count; + + if (rq_data->in_use) { + /* Allocate data queue resources */ + rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx, + nb_data_desc, + sizeof(struct rq_enet_desc)); + if (rc) { + dev_err(enic, "error in allocation of data rq\n"); + goto err_free_rq_sop; + } + nb_data_desc = rq_data->ring.desc_count; + } rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, - socket_id, nb_desc, - sizeof(struct cq_enet_rq_desc)); + socket_id, nb_sop_desc + nb_data_desc, + sizeof(struct cq_enet_rq_desc)); if (rc) { dev_err(enic, "error in allocation of cq for rq\n"); - goto err_free_rq_exit; + goto err_free_rq_data; } - /* Allocate the mbuf ring */ - rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", - sizeof(struct rte_mbuf *) * nb_desc, - RTE_CACHE_LINE_SIZE, rq->socket_id); + /* Allocate the mbuf rings */ + rq_sop->mbuf_ring = (struct rte_mbuf **) + rte_zmalloc_socket("rq->mbuf_ring", + sizeof(struct rte_mbuf *) * nb_sop_desc, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_sop->mbuf_ring == NULL) + goto err_free_cq; + + if (rq_data->in_use) { + rq_data->mbuf_ring = (struct rte_mbuf **) + rte_zmalloc_socket("rq->mbuf_ring", + sizeof(struct rte_mbuf *) * nb_data_desc, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_data->mbuf_ring == NULL) + goto err_free_sop_mbuf; + } - if (rq->mbuf_ring != NULL) - return 0; + return 0; +err_free_sop_mbuf: + rte_free(rq_sop->mbuf_ring); +err_free_cq: /* cleanup on error */ vnic_cq_free(&enic->cq[queue_idx]); -err_free_rq_exit: - vnic_rq_free(rq); +err_free_rq_data: + if (rq_data->in_use) + vnic_rq_free(rq_data); +err_free_rq_sop: + vnic_rq_free(rq_sop); err_exit: return -ENOMEM; } void enic_free_wq(void *txq) { - struct vnic_wq *wq = (struct vnic_wq *)txq; - struct enic *enic = vnic_dev_priv(wq->vdev); + struct vnic_wq *wq; + struct enic *enic; + + if (txq == NULL) + return; + wq = (struct vnic_wq *)txq; + enic = vnic_dev_priv(wq->vdev); + rte_memzone_free(wq->cqmsg_rz); vnic_wq_free(wq); vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); } @@ -579,6 +718,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, int err; struct vnic_wq *wq = &enic->wq[queue_idx]; unsigned int cq_index = enic_cq_wq(enic, queue_idx); + char name[NAME_MAX]; + static int instance; wq->socket_id = socket_id; if (nb_desc) { @@ -614,6 +755,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, dev_err(enic, "error in allocation of cq for wq\n"); } + /* setup up CQ message */ + snprintf((char *)name, sizeof(name), + "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx, + instance++); + + wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, + sizeof(uint32_t), + SOCKET_ID_ANY, 0, + ENIC_ALIGN); + if (!wq->cqmsg_rz) + return -ENOMEM; + return err; } @@ -637,10 +790,12 @@ int enic_disable(struct enic *enic) if (err) return err; } - for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_disable(&enic->rq[i]); - if (err) - return err; + for (i = 0; i < enic_vnic_rq_count(enic); i++) { + if (enic->rq[i].in_use) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; + } } vnic_dev_set_reset_flag(enic->vdev, 1); @@ -649,8 +804,9 @@ int enic_disable(struct enic *enic) for (i = 0; i < enic->wq_count; i++) vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + for (i = 0; i < enic_vnic_rq_count(enic); i++) + if (enic->rq[i].in_use) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); vnic_intr_clean(&enic->intr); @@ -723,7 +879,7 @@ static int enic_set_rsskey(struct enic *enic) rss_key_buf_pa, sizeof(union vnic_rss_key)); - enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key), + enic_free_consistent(enic, sizeof(union vnic_rss_key), rss_key_buf_va, rss_key_buf_pa); return err; @@ -744,13 +900,14 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) return -ENOMEM; for (i = 0; i < (1 << rss_hash_bits); i++) - (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; + (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] = + enic_sop_rq(i % enic->rq_count); err = enic_set_rss_cpu(enic, rss_cpu_buf_pa, sizeof(union vnic_rss_cpu)); - enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), + enic_free_consistent(enic, sizeof(union vnic_rss_cpu), rss_cpu_buf_va, rss_cpu_buf_pa); return err; @@ -806,6 +963,8 @@ int enic_setup_finish(struct enic *enic) { int ret; + enic_init_soft_stats(enic); + ret = enic_set_rss_nic_cfg(enic); if (ret) { dev_err(enic, "Failed to config nic, aborting.\n"); @@ -851,21 +1010,79 @@ static void enic_dev_deinit(struct enic *enic) int enic_set_vnic_res(struct enic *enic) { struct rte_eth_dev *eth_dev = enic->rte_dev; + int rc = 0; - if ((enic->rq_count < eth_dev->data->nb_rx_queues) || - (enic->wq_count < eth_dev->data->nb_tx_queues)) { - dev_err(dev, "Not enough resources configured, aborting\n"); - return -1; + /* With Rx scatter support, two RQs are now used per RQ used by + * the application. + */ + if (enic->rq_count < (eth_dev->data->nb_rx_queues * 2)) { + dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", + eth_dev->data->nb_rx_queues, + eth_dev->data->nb_rx_queues * 2, enic->rq_count); + rc = -EINVAL; + } + if (enic->wq_count < eth_dev->data->nb_tx_queues) { + dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", + eth_dev->data->nb_tx_queues, enic->wq_count); + rc = -EINVAL; } - enic->rq_count = eth_dev->data->nb_rx_queues; - enic->wq_count = eth_dev->data->nb_tx_queues; if (enic->cq_count < (enic->rq_count + enic->wq_count)) { - dev_err(dev, "Not enough resources configured, aborting\n"); - return -1; + dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", + enic->rq_count + enic->wq_count, enic->cq_count); + rc = -EINVAL; + } + + if (rc == 0) { + enic->rq_count = eth_dev->data->nb_rx_queues; + enic->wq_count = eth_dev->data->nb_tx_queues; + enic->cq_count = enic->rq_count + enic->wq_count; } - enic->cq_count = enic->rq_count + enic->wq_count; + return rc; +} + +/* The Cisco NIC can send and receive packets up to a max packet size + * determined by the NIC type and firmware. There is also an MTU + * configured into the NIC via the CIMC/UCSM management interface + * which can be overridden by this function (up to the max packet size). + * Depending on the network setup, doing so may cause packet drops + * and unexpected behavior. + */ +int enic_set_mtu(struct enic *enic, uint16_t new_mtu) +{ + uint16_t old_mtu; /* previous setting */ + uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */ + struct rte_eth_dev *eth_dev = enic->rte_dev; + + old_mtu = eth_dev->data->mtu; + config_mtu = enic->config.mtu; + + /* only works with Rx scatter disabled */ + if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) + return -ENOTSUP; + + if (new_mtu > enic->max_mtu) { + dev_err(enic, + "MTU not updated: requested (%u) greater than max (%u)\n", + new_mtu, enic->max_mtu); + return -EINVAL; + } + if (new_mtu < ENIC_MIN_MTU) { + dev_info(enic, + "MTU not updated: requested (%u) less than min (%u)\n", + new_mtu, ENIC_MIN_MTU); + return -EINVAL; + } + if (new_mtu > config_mtu) + dev_warning(enic, + "MTU (%u) is greater than value configured in NIC (%u)\n", + new_mtu, config_mtu); + + /* update the mtu */ + eth_dev->data->mtu = new_mtu; + + dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu); return 0; } @@ -920,6 +1137,9 @@ int enic_probe(struct enic *enic) goto err_out; } + LIST_INIT(&enic->memzone_list); + rte_spinlock_init(&enic->memzone_list_lock); + vnic_register_cbacks(enic->vdev, enic_alloc_consistent, enic_free_consistent); diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index ebe379dd..b271d340 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -83,6 +83,20 @@ int enic_get_vnic_config(struct enic *enic) GET_CONFIG(intr_timer_usec); GET_CONFIG(loop_tag); GET_CONFIG(num_arfs); + GET_CONFIG(max_pkt_size); + + /* max packet size is only defined in newer VIC firmware + * and will be 0 for legacy firmware and VICs + */ + if (c->max_pkt_size > ENIC_DEFAULT_MAX_PKT_SIZE) + enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4); + else + enic->max_mtu = ENIC_DEFAULT_MAX_PKT_SIZE - (ETHER_HDR_LEN + 4); + if (c->mtu == 0) + c->mtu = 1500; + + enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu, + max_t(u16, ENIC_MIN_MTU, c->mtu)); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, @@ -96,21 +110,16 @@ int enic_get_vnic_config(struct enic *enic) c->rq_desc_count)); c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ - if (c->mtu == 0) - c->mtu = 1500; - c->mtu = min_t(u16, ENIC_MAX_MTU, - max_t(u16, ENIC_MIN_MTU, - c->mtu)); - c->intr_timer_usec = min_t(u32, c->intr_timer_usec, vnic_dev_get_intr_coal_timer_max(enic->vdev)); dev_info(enic_get_dev(enic), "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " - "wq/rq %d/%d mtu %d\n", + "wq/rq %d/%d mtu %d, max mtu:%d\n", enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2], enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5], - c->wq_desc_count, c->rq_desc_count, c->mtu); + c->wq_desc_count, c->rq_desc_count, + enic->rte_dev->data->mtu, enic->max_mtu); dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " "rss %s intr mode %s type %s timer %d usec " "loopback tag 0x%04x\n", @@ -196,8 +205,9 @@ void enic_free_vnic_resources(struct enic *enic) for (i = 0; i < enic->wq_count; i++) vnic_wq_free(&enic->wq[i]); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_free(&enic->rq[i]); + for (i = 0; i < enic_vnic_rq_count(enic); i++) + if (enic->rq[i].in_use) + vnic_rq_free(&enic->rq[i]); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); vnic_intr_free(&enic->intr); diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h index 00fa71de..303530ef 100644 --- a/drivers/net/enic/enic_res.h +++ b/drivers/net/enic/enic_res.h @@ -46,95 +46,19 @@ #define ENIC_MAX_RQ_DESCS 4096 #define ENIC_MIN_MTU 68 -#define ENIC_MAX_MTU 9000 + +/* Does not include (possible) inserted VLAN tag and FCS */ +#define ENIC_DEFAULT_MAX_PKT_SIZE 9022 #define ENIC_MULTICAST_PERFECT_FILTERS 32 #define ENIC_UNICAST_PERFECT_FILTERS 32 #define ENIC_NON_TSO_MAX_DESC 16 #define ENIC_DEFAULT_RX_FREE_THRESH 32 -#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2) +#define ENIC_TX_XMIT_MAX 64 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) -static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - unsigned int mss_or_csum_offset, unsigned int hdr_len, - int vlan_tag_insert, unsigned int vlan_tag, - int offload_mode, int cq_entry, int sop, int eop, int loopback) -{ - struct wq_enet_desc *desc = vnic_wq_next_desc(wq); - u8 desc_skip_cnt = 1; - u8 compressed_send = 0; - u64 wrid = 0; - - wq_enet_desc_enc(desc, - (u64)dma_addr | VNIC_PADDR_TARGET, - (u16)len, - (u16)mss_or_csum_offset, - (u16)hdr_len, (u8)offload_mode, - (u8)eop, (u8)cq_entry, - 0, /* fcoe_encap */ - (u8)vlan_tag_insert, - (u16)vlan_tag, - (u8)loopback); - - vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, - (u8)cq_entry, compressed_send, wrid); -} - -static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - 0, 0, 0, 0, 0, - eop, 0 /* !SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, - dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, - unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - 0, 0, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_CSUM, - eop, 1 /* SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - int ip_csum, int tcpudp_csum, int vlan_tag_insert, - unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), - 0, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_CSUM, - eop, 1 /* SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - unsigned int csum_offset, unsigned int hdr_len, - int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - csum_offset, hdr_len, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_CSUM_L4, - eop, 1 /* SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, - unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - mss, hdr_len, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_TSO, - eop, 1 /* SOP */, eop, loopback); -} struct enic; diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c deleted file mode 100644 index 232987a5..00000000 --- a/drivers/net/enic/enic_rx.c +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. - * Copyright 2007 Nuova Systems, Inc. All rights reserved. - * - * Copyright (c) 2014, Cisco Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include <rte_mbuf.h> -#include <rte_ethdev.h> -#include <rte_prefetch.h> - -#include "enic_compat.h" -#include "rq_enet_desc.h" -#include "enic.h" - -#define RTE_PMD_USE_PREFETCH - -#ifdef RTE_PMD_USE_PREFETCH -/* - * Prefetch a cache line into all cache levels. - */ -#define rte_enic_prefetch(p) rte_prefetch0(p) -#else -#define rte_enic_prefetch(p) do {} while (0) -#endif - -#ifdef RTE_PMD_PACKET_PREFETCH -#define rte_packet_prefetch(p) rte_prefetch1(p) -#else -#define rte_packet_prefetch(p) do {} while (0) -#endif - -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - -static inline uint16_t -enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) -{ - return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; -} - -static inline uint16_t -enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) -{ - return(le16_to_cpu(crd->bytes_written_flags) & - ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); -} - -static inline uint8_t -enic_cq_rx_desc_packet_error(uint16_t bwflags) -{ - return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == - CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); -} - -static inline uint8_t -enic_cq_rx_desc_eop(uint16_t ciflags) -{ - return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) - == CQ_ENET_RQ_DESC_FLAGS_EOP; -} - -static inline uint8_t -enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) -{ - return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); -} - -static inline uint8_t -enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) -{ - return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == - CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); -} - -static inline uint8_t -enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) -{ - return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == - CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); -} - -static inline uint8_t -enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) -{ - return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> - CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); -} - -static inline uint32_t -enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) -{ - return le32_to_cpu(cqrd->rss_hash); -} - -static inline uint16_t -enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) -{ - return le16_to_cpu(cqrd->vlan); -} - -static inline uint16_t -enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - return le16_to_cpu(cqrd->bytes_written_flags) & - CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; -} - -static inline uint8_t -enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t bwflags; - int ret = 0; - uint64_t pkt_err_flags = 0; - - bwflags = enic_cq_rx_desc_bwflags(cqrd); - if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { - pkt_err_flags = PKT_RX_MAC_ERR; - ret = 1; - } - *pkt_err_flags_out = pkt_err_flags; - return ret; -} - -/* - * Lookup table to translate RX CQ flags to mbuf flags. - */ -static inline uint32_t -enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint8_t cqrd_flags = cqrd->flags; - static const uint32_t cq_type_table[128] __rte_cache_aligned = { - [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, - [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 - | RTE_PTYPE_L4_UDP, - [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 - | RTE_PTYPE_L4_TCP, - [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 - | RTE_PTYPE_L4_FRAG, - [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, - [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 - | RTE_PTYPE_L4_UDP, - [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 - | RTE_PTYPE_L4_TCP, - [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 - | RTE_PTYPE_L4_FRAG, - /* All others reserved */ - }; - cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT - | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 - | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; - return cq_type_table[cqrd_flags]; -} - -static inline void -enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t ciflags, bwflags, pkt_flags = 0; - ciflags = enic_cq_rx_desc_ciflags(cqrd); - bwflags = enic_cq_rx_desc_bwflags(cqrd); - - mbuf->ol_flags = 0; - - /* flags are meaningless if !EOP */ - if (unlikely(!enic_cq_rx_desc_eop(ciflags))) - goto mbuf_flags_done; - - /* VLAN stripping */ - if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { - pkt_flags |= PKT_RX_VLAN_PKT; - mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); - } else { - mbuf->vlan_tci = 0; - } - - /* RSS flag */ - if (enic_cq_rx_desc_rss_type(cqrd)) { - pkt_flags |= PKT_RX_RSS_HASH; - mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); - } - - /* checksum flags */ - if (!enic_cq_rx_desc_csum_not_calc(cqrd) && - (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { - if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) - pkt_flags |= PKT_RX_IP_CKSUM_BAD; - if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { - if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) - pkt_flags |= PKT_RX_L4_CKSUM_BAD; - } - } - - mbuf_flags_done: - mbuf->ol_flags = pkt_flags; -} - -static inline uint32_t -enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -{ - uint32_t d = i0 + i1; - ASSERT(i0 < n_descriptors); - ASSERT(i1 < n_descriptors); - d -= (d >= n_descriptors) ? n_descriptors : 0; - return d; -} - - -uint16_t -enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) -{ - struct vnic_rq *rq = rx_queue; - struct enic *enic = vnic_dev_priv(rq->vdev); - unsigned int rx_id; - struct rte_mbuf *nmb, *rxmb; - uint16_t nb_rx = 0; - uint16_t nb_hold; - struct vnic_cq *cq; - volatile struct cq_desc *cqd_ptr; - uint8_t color; - - cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; - - nb_hold = rq->rx_nb_hold; /* mbufs held by software */ - - while (nb_rx < nb_pkts) { - volatile struct rq_enet_desc *rqd_ptr; - dma_addr_t dma_addr; - struct cq_desc cqd; - uint64_t ol_err_flags; - uint8_t packet_error; - - /* Check for pkts available */ - color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) - & CQ_DESC_COLOR_MASK; - if (color == cq->last_color) - break; - - /* Get the cq descriptor and rq pointer */ - cqd = *cqd_ptr; - rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; - - /* allocate a new mbuf */ - nmb = rte_rxmbuf_alloc(rq->mp); - if (nmb == NULL) { - dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", - enic->port_id, (unsigned)rq->index); - rte_eth_devices[enic->port_id]. - data->rx_mbuf_alloc_failed++; - break; - } - - /* A packet error means descriptor and data are untrusted */ - packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); - - /* Get the mbuf to return and replace with one just allocated */ - rxmb = rq->mbuf_ring[rx_id]; - rq->mbuf_ring[rx_id] = nmb; - - /* Increment cqd, rqd, mbuf_table index */ - rx_id++; - if (unlikely(rx_id == rq->ring.desc_count)) { - rx_id = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - - /* Prefetch next mbuf & desc while processing current one */ - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; - rte_enic_prefetch(cqd_ptr); - rte_enic_prefetch(rq->mbuf_ring[rx_id]); - rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) - + rx_id); - - /* Push descriptor for newly allocated mbuf */ - dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off); - rqd_ptr->address = rte_cpu_to_le_64(dma_addr); - rqd_ptr->length_type = cpu_to_le16(nmb->buf_len); - - /* Fill in the rest of the mbuf */ - rxmb->data_off = RTE_PKTMBUF_HEADROOM; - rxmb->nb_segs = 1; - rxmb->next = NULL; - rxmb->port = enic->port_id; - if (!packet_error) { - rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); - rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); - enic_cq_rx_to_pkt_flags(&cqd, rxmb); - } else { - rxmb->pkt_len = 0; - rxmb->packet_type = 0; - rxmb->ol_flags = 0; - } - rxmb->data_len = rxmb->pkt_len; - - /* prefetch mbuf data for caller */ - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, - RTE_PKTMBUF_HEADROOM)); - - /* store the mbuf address into the next entry of the array */ - rx_pkts[nb_rx++] = rxmb; - } - - nb_hold += nb_rx; - cq->to_clean = rx_id; - - if (nb_hold > rq->rx_free_thresh) { - rq->posted_index = enic_ring_add(rq->ring.desc_count, - rq->posted_index, nb_hold); - nb_hold = 0; - rte_mb(); - iowrite32(rq->posted_index, &rq->ctrl->posted_index); - } - - rq->rx_nb_hold = nb_hold; - - return nb_rx; -} diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c new file mode 100644 index 00000000..5ac1d69c --- /dev/null +++ b/drivers/net/enic/enic_rxtx.c @@ -0,0 +1,547 @@ +/* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> + +#include "enic_compat.h" +#include "rq_enet_desc.h" +#include "enic.h" + +#define RTE_PMD_USE_PREFETCH + +#ifdef RTE_PMD_USE_PREFETCH +/*Prefetch a cache line into all cache levels. */ +#define rte_enic_prefetch(p) rte_prefetch0(p) +#else +#define rte_enic_prefetch(p) do {} while (0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while (0) +#endif + +static inline uint16_t +enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) +{ + return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; +} + +static inline uint16_t +enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) +{ + return le16_to_cpu(crd->bytes_written_flags) & + ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; +} + +static inline uint8_t +enic_cq_rx_desc_packet_error(uint16_t bwflags) +{ + return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED; +} + +static inline uint8_t +enic_cq_rx_desc_eop(uint16_t ciflags) +{ + return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) + == CQ_ENET_RQ_DESC_FLAGS_EOP; +} + +static inline uint8_t +enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) +{ + return (le16_to_cpu(cqrd->q_number_rss_type_flags) & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC; +} + +static inline uint8_t +enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK; +} + +static inline uint8_t +enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK; +} + +static inline uint8_t +enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) +{ + return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> + CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); +} + +static inline uint32_t +enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) +{ + return le32_to_cpu(cqrd->rss_hash); +} + +static inline uint16_t +enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) +{ + return le16_to_cpu(cqrd->vlan); +} + +static inline uint16_t +enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + return le16_to_cpu(cqrd->bytes_written_flags) & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; +} + +static inline uint8_t +enic_cq_rx_check_err(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t bwflags; + + bwflags = enic_cq_rx_desc_bwflags(cqrd); + if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) + return 1; + return 0; +} + +/* Lookup table to translate RX CQ flags to mbuf flags. */ +static inline uint32_t +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint8_t cqrd_flags = cqrd->flags; + static const uint32_t cq_type_table[128] __rte_cache_aligned = { + [0x00] = RTE_PTYPE_UNKNOWN, + [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_NONFRAG, + [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_FRAG, + [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_NONFRAG, + [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_FRAG, + [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + /* All others reserved */ + }; + cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT + | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 + | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; + return cq_type_table[cqrd_flags]; +} + +static inline void +enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t ciflags, bwflags, pkt_flags = 0; + ciflags = enic_cq_rx_desc_ciflags(cqrd); + bwflags = enic_cq_rx_desc_bwflags(cqrd); + + mbuf->ol_flags = 0; + + /* flags are meaningless if !EOP */ + if (unlikely(!enic_cq_rx_desc_eop(ciflags))) + goto mbuf_flags_done; + + /* VLAN stripping */ + if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { + pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); + } else { + mbuf->vlan_tci = 0; + } + + /* RSS flag */ + if (enic_cq_rx_desc_rss_type(cqrd)) { + pkt_flags |= PKT_RX_RSS_HASH; + mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); + } + + /* checksum flags */ + if (!enic_cq_rx_desc_csum_not_calc(cqrd) && + (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { + if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { + if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + + mbuf_flags_done: + mbuf->ol_flags = pkt_flags; +} + +uint16_t +enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct vnic_rq *sop_rq = rx_queue; + struct vnic_rq *data_rq; + struct vnic_rq *rq; + struct enic *enic = vnic_dev_priv(sop_rq->vdev); + uint16_t cq_idx; + uint16_t rq_idx; + uint16_t rq_num; + struct rte_mbuf *nmb, *rxmb; + uint16_t nb_rx = 0; + struct vnic_cq *cq; + volatile struct cq_desc *cqd_ptr; + uint8_t color; + uint16_t seg_length; + struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; + struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; + + cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; + cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + + data_rq = &enic->rq[sop_rq->data_queue_idx]; + + while (nb_rx < nb_pkts) { + volatile struct rq_enet_desc *rqd_ptr; + dma_addr_t dma_addr; + struct cq_desc cqd; + uint8_t packet_error; + uint16_t ciflags; + + /* Check for pkts available */ + color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) + & CQ_DESC_COLOR_MASK; + if (color == cq->last_color) + break; + + /* Get the cq descriptor and extract rq info from it */ + cqd = *cqd_ptr; + rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK; + rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK; + + rq = &enic->rq[rq_num]; + rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx; + + /* allocate a new mbuf */ + nmb = rte_mbuf_raw_alloc(rq->mp); + if (nmb == NULL) { + rte_atomic64_inc(&enic->soft_stats.rx_nombuf); + break; + } + + /* A packet error means descriptor and data are untrusted */ + packet_error = enic_cq_rx_check_err(&cqd); + + /* Get the mbuf to return and replace with one just allocated */ + rxmb = rq->mbuf_ring[rq_idx]; + rq->mbuf_ring[rq_idx] = nmb; + + /* Increment cqd, rqd, mbuf_table index */ + cq_idx++; + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + /* Prefetch next mbuf & desc while processing current one */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + rte_enic_prefetch(cqd_ptr); + + ciflags = enic_cq_rx_desc_ciflags( + (struct cq_enet_rq_desc *)&cqd); + + /* Push descriptor for newly allocated mbuf */ + dma_addr = (dma_addr_t)(nmb->buf_physaddr + + RTE_PKTMBUF_HEADROOM); + rq_enet_desc_enc(rqd_ptr, dma_addr, + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP + : RQ_ENET_TYPE_NOT_SOP), + nmb->buf_len - RTE_PKTMBUF_HEADROOM); + + /* Fill in the rest of the mbuf */ + seg_length = enic_cq_rx_desc_n_bytes(&cqd); + + if (rq->is_sop) { + first_seg = rxmb; + first_seg->nb_segs = 1; + first_seg->pkt_len = seg_length; + } else { + first_seg->pkt_len = (uint16_t)(first_seg->pkt_len + + seg_length); + first_seg->nb_segs++; + last_seg->next = rxmb; + } + + rxmb->next = NULL; + rxmb->port = enic->port_id; + rxmb->data_len = seg_length; + + rq->rx_nb_hold++; + + if (!(enic_cq_rx_desc_eop(ciflags))) { + last_seg = rxmb; + continue; + } + + /* cq rx flags are only valid if eop bit is set */ + first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); + enic_cq_rx_to_pkt_flags(&cqd, first_seg); + + if (unlikely(packet_error)) { + rte_pktmbuf_free(first_seg); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + continue; + } + + + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr, + RTE_PKTMBUF_HEADROOM)); + + /* store the mbuf address into the next entry of the array */ + rx_pkts[nb_rx++] = first_seg; + } + + sop_rq->pkt_first_seg = first_seg; + sop_rq->pkt_last_seg = last_seg; + + cq->to_clean = cq_idx; + + if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > + sop_rq->rx_free_thresh) { + if (data_rq->in_use) { + data_rq->posted_index = + enic_ring_add(data_rq->ring.desc_count, + data_rq->posted_index, + data_rq->rx_nb_hold); + data_rq->rx_nb_hold = 0; + } + sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count, + sop_rq->posted_index, + sop_rq->rx_nb_hold); + sop_rq->rx_nb_hold = 0; + + rte_mb(); + if (data_rq->in_use) + iowrite32(data_rq->posted_index, + &data_rq->ctrl->posted_index); + rte_compiler_barrier(); + iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index); + } + + + return nb_rx; +} + +static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) +{ + struct vnic_wq_buf *buf; + struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; + unsigned int nb_to_free, nb_free = 0, i; + struct rte_mempool *pool; + unsigned int tail_idx; + unsigned int desc_count = wq->ring.desc_count; + + nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) + + 1; + tail_idx = wq->tail_idx; + buf = &wq->bufs[tail_idx]; + pool = ((struct rte_mbuf *)buf->mb)->pool; + for (i = 0; i < nb_to_free; i++) { + buf = &wq->bufs[tail_idx]; + m = (struct rte_mbuf *)(buf->mb); + if (likely(m->pool == pool)) { + ENIC_ASSERT(nb_free < ENIC_MAX_WQ_DESCS); + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(pool, (void *)free, nb_free); + free[0] = m; + nb_free = 1; + pool = m->pool; + } + tail_idx = enic_ring_incr(desc_count, tail_idx); + buf->mb = NULL; + } + + rte_mempool_put_bulk(pool, (void **)free, nb_free); + + wq->tail_idx = tail_idx; + wq->ring.desc_avail += nb_to_free; +} + +unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) +{ + u16 completed_index; + + completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; + + if (wq->last_completed_index != completed_index) { + enic_free_wq_bufs(wq, completed_index); + wq->last_completed_index = completed_index; + } + return 0; +} + +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t index; + unsigned int pkt_len, data_len; + unsigned int nb_segs; + struct rte_mbuf *tx_pkt; + struct vnic_wq *wq = (struct vnic_wq *)tx_queue; + struct enic *enic = vnic_dev_priv(wq->vdev); + unsigned short vlan_id; + uint64_t ol_flags; + uint64_t ol_flags_mask; + unsigned int wq_desc_avail; + int head_idx; + struct vnic_wq_buf *buf; + unsigned int desc_count; + struct wq_enet_desc *descs, *desc_p, desc_tmp; + uint16_t mss; + uint8_t vlan_tag_insert; + uint8_t eop; + uint64_t bus_addr; + + enic_cleanup_wq(enic, wq); + wq_desc_avail = vnic_wq_desc_avail(wq); + head_idx = wq->head_idx; + desc_count = wq->ring.desc_count; + ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK; + + nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); + + for (index = 0; index < nb_pkts; index++) { + tx_pkt = *tx_pkts++; + nb_segs = tx_pkt->nb_segs; + if (nb_segs > wq_desc_avail) { + if (index > 0) + goto post; + goto done; + } + + pkt_len = tx_pkt->pkt_len; + data_len = tx_pkt->data_len; + ol_flags = tx_pkt->ol_flags; + mss = 0; + vlan_id = 0; + vlan_tag_insert = 0; + bus_addr = (dma_addr_t) + (tx_pkt->buf_physaddr + tx_pkt->data_off); + + descs = (struct wq_enet_desc *)wq->ring.descs; + desc_p = descs + head_idx; + + eop = (data_len == pkt_len); + + if (ol_flags & ol_flags_mask) { + if (ol_flags & PKT_TX_VLAN_PKT) { + vlan_tag_insert = 1; + vlan_id = tx_pkt->vlan_tci; + } + + if (ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM; + + /* Nic uses just 1 bit for UDP and TCP */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + case PKT_TX_UDP_CKSUM: + mss |= ENIC_CALC_TCP_UDP_CKSUM; + break; + } + } + + wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop, + eop, 0, vlan_tag_insert, vlan_id, 0); + + *desc_p = desc_tmp; + buf = &wq->bufs[head_idx]; + buf->mb = (void *)tx_pkt; + head_idx = enic_ring_incr(desc_count, head_idx); + wq_desc_avail--; + + if (!eop) { + for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt = + tx_pkt->next) { + data_len = tx_pkt->data_len; + + if (tx_pkt->next == NULL) + eop = 1; + desc_p = descs + head_idx; + bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr + + tx_pkt->data_off); + wq_enet_desc_enc((struct wq_enet_desc *) + &desc_tmp, bus_addr, data_len, + mss, 0, 0, eop, eop, 0, + vlan_tag_insert, vlan_id, 0); + + *desc_p = desc_tmp; + buf = &wq->bufs[head_idx]; + buf->mb = (void *)tx_pkt; + head_idx = enic_ring_incr(desc_count, head_idx); + wq_desc_avail--; + } + } + } + post: + rte_wmb(); + iowrite32(head_idx, &wq->ctrl->posted_index); + done: + wq->ring.desc_avail = wq_desc_avail; + wq->head_idx = head_idx; + + return index; +} + + |